aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/IRQ-domain.txt8
-rw-r--r--Documentation/RCU/stallwarn.txt7
-rw-r--r--Documentation/RCU/torture.txt39
-rw-r--r--Documentation/RCU/trace.txt32
-rw-r--r--Documentation/RCU/whatisRCU.txt6
-rw-r--r--Documentation/arm/OMAP/README7
-rw-r--r--Documentation/arm/SA1100/Victor16
-rw-r--r--Documentation/arm/memory.txt2
-rw-r--r--Documentation/arm/uefi.txt2
-rw-r--r--Documentation/arm64/booting.txt11
-rw-r--r--Documentation/atomic_ops.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/gic.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/twd.txt5
-rw-r--r--Documentation/devicetree/bindings/edac/apm-xgene-edac.txt23
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-msm.txt26
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca953x.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-zynq.txt9
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt41
-rw-r--r--Documentation/devicetree/bindings/gpio/netxbig-gpio-ext.txt22
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt1
-rw-r--r--Documentation/devicetree/bindings/leds/leds-aat1290.txt8
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6328.txt10
-rw-r--r--Documentation/devicetree/bindings/leds/leds-netxbig.txt92
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-esdhc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.txt11
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,mmcif.txt5
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt13
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt25
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt1
-rw-r--r--Documentation/devicetree/bindings/net/smsc-lan87xx.txt24
-rw-r--r--Documentation/devicetree/bindings/pci/pci-msi.txt220
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt90
-rw-r--r--Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,cygnus-gpio.txt16
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt63
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt1
-rw-r--r--Documentation/edac.txt46
-rw-r--r--Documentation/features/vm/THP/arch-support.txt2
-rw-r--r--Documentation/features/vm/pte_special/arch-support.txt2
-rw-r--r--Documentation/filesystems/proc.txt5
-rw-r--r--Documentation/gpio/driver.txt80
-rw-r--r--Documentation/hwmon/lm755
-rw-r--r--Documentation/hwmon/max3179037
-rw-r--r--Documentation/kernel-parameters.txt24
-rw-r--r--Documentation/locking/lockstat.txt2
-rw-r--r--Documentation/locking/locktorture.txt3
-rw-r--r--Documentation/memory-barriers.txt65
-rw-r--r--Documentation/mmc/mmc-dev-attrs.txt10
-rw-r--r--MAINTAINERS52
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/atomic.h8
-rw-r--r--arch/arc/Kconfig40
-rw-r--r--arch/arc/boot/dts/axc001.dtsi2
-rw-r--r--arch/arc/boot/dts/axc003.dtsi2
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi2
-rw-r--r--arch/arc/boot/dts/nsim_hs.dts12
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi2
-rw-r--r--arch/arc/boot/dts/vdk_axc003.dtsi2
-rw-r--r--arch/arc/boot/dts/vdk_axc003_idu.dtsi2
-rw-r--r--arch/arc/configs/axs101_defconfig1
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/include/asm/arcregs.h6
-rw-r--r--arch/arc/include/asm/atomic.h8
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/include/asm/cacheflush.h8
-rw-r--r--arch/arc/include/asm/entry-compact.h13
-rw-r--r--arch/arc/include/asm/highmem.h61
-rw-r--r--arch/arc/include/asm/hugepage.h81
-rw-r--r--arch/arc/include/asm/irq.h1
-rw-r--r--arch/arc/include/asm/irqflags-compact.h16
-rw-r--r--arch/arc/include/asm/kmap_types.h18
-rw-r--r--arch/arc/include/asm/mach_desc.h10
-rw-r--r--arch/arc/include/asm/mcip.h3
-rw-r--r--arch/arc/include/asm/mmu.h7
-rw-r--r--arch/arc/include/asm/page.h9
-rw-r--r--arch/arc/include/asm/pgalloc.h12
-rw-r--r--arch/arc/include/asm/pgtable.h88
-rw-r--r--arch/arc/include/asm/processor.h7
-rw-r--r--arch/arc/include/asm/setup.h7
-rw-r--r--arch/arc/include/asm/smp.h7
-rw-r--r--arch/arc/include/asm/tlbflush.h5
-rw-r--r--arch/arc/include/uapi/asm/page.h11
-rw-r--r--arch/arc/kernel/entry-arcv2.S2
-rw-r--r--arch/arc/kernel/entry-compact.S43
-rw-r--r--arch/arc/kernel/head.S49
-rw-r--r--arch/arc/kernel/intc-compact.c90
-rw-r--r--arch/arc/kernel/irq.c20
-rw-r--r--arch/arc/kernel/mcip.c46
-rw-r--r--arch/arc/kernel/setup.c7
-rw-r--r--arch/arc/kernel/smp.c66
-rw-r--r--arch/arc/kernel/time.c3
-rw-r--r--arch/arc/kernel/vmlinux.lds.S2
-rw-r--r--arch/arc/mm/Makefile1
-rw-r--r--arch/arc/mm/cache.c91
-rw-r--r--arch/arc/mm/fault.c13
-rw-r--r--arch/arc/mm/highmem.c140
-rw-r--r--arch/arc/mm/init.c104
-rw-r--r--arch/arc/mm/tlb.c234
-rw-r--r--arch/arc/mm/tlbex.S51
-rw-r--r--arch/arc/plat-axs10x/axs10x.c8
-rw-r--r--arch/arc/plat-sim/platform.c4
-rw-r--r--arch/arm/Kconfig9
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts3
-rw-r--r--arch/arm/boot/dts/armada-385-db-ap.dts2
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi6
-rw-r--r--arch/arm/boot/dts/emev2-kzm9d.dts8
-rw-r--r--arch/arm/boot/dts/exynos5420-peach-pit.dts5
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts5
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi4
-rw-r--r--arch/arm/boot/dts/kirkwood-net5big.dts60
-rw-r--r--arch/arm/boot/dts/kirkwood-netxbig.dtsi80
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts2
-rw-r--r--arch/arm/boot/dts/meson.dtsi23
-rw-r--r--arch/arm/boot/dts/omap3-evm-37xx.dts2
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi7
-rw-r--r--arch/arm/boot/dts/rk3288-veyron.dtsi6
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi20
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi14
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts10
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts2
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--arch/arm/configs/hisi_defconfig1
-rw-r--r--arch/arm/configs/lpc18xx_defconfig1
-rw-r--r--arch/arm/include/asm/arch_gicv3.h188
-rw-r--r--arch/arm/include/asm/atomic.h12
-rw-r--r--arch/arm/include/asm/cmpxchg.h12
-rw-r--r--arch/arm/include/asm/irqflags.h10
-rw-r--r--arch/arm/include/asm/mach/arch.h2
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/smp.h4
-rw-r--r--arch/arm/include/asm/unistd.h7
-rw-r--r--arch/arm/kernel/devtree.c12
-rw-r--r--arch/arm/kernel/entry-armv.S33
-rw-r--r--arch/arm/kernel/hw_breakpoint.c1
-rw-r--r--arch/arm/kernel/kgdb.c31
-rw-r--r--arch/arm/kernel/smp.c12
-rw-r--r--arch/arm/kernel/smp_twd.c11
-rw-r--r--arch/arm/kernel/traps.c52
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/kvm/arm.c2
-rw-r--r--arch/arm/lib/clear_user.S4
-rw-r--r--arch/arm/mach-exynos/pm_domains.c8
-rw-r--r--arch/arm/mach-exynos/suspend.c55
-rw-r--r--arch/arm/mach-imx/gpc.c55
-rw-r--r--arch/arm/mach-mvebu/Kconfig7
-rw-r--r--arch/arm/mach-mvebu/Makefile1
-rw-r--r--arch/arm/mach-mvebu/board.h21
-rw-r--r--arch/arm/mach-mvebu/kirkwood.c4
-rw-r--r--arch/arm/mach-mvebu/netxbig.c191
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/board-generic.c10
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c55
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c9
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c9
-rw-r--r--arch/arm/mm/Kconfig12
-rw-r--r--arch/arm/mm/dma-mapping.c7
-rw-r--r--arch/arm/mm/fault.c22
-rw-r--r--arch/arm/mm/fault.h1
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/plat-orion/common.c2
-rw-r--r--arch/arm/vdso/vdsomunge.c17
-rw-r--r--arch/arm64/Kconfig27
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi10
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi12
-rw-r--r--arch/arm64/include/asm/acpi.h5
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h170
-rw-r--r--arch/arm64/include/asm/atomic.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h3
-rw-r--r--arch/arm64/include/asm/cputype.h17
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/kernel/acpi.c29
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c16
-rw-r--r--arch/arm64/kernel/cpu_errata.c9
-rw-r--r--arch/arm64/kernel/cpufeature.c19
-rw-r--r--arch/arm64/kernel/efi-stub.c14
-rw-r--r--arch/arm64/kernel/efi.c23
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/stacktrace.c6
-rw-r--r--arch/arm64/kernel/suspend.c22
-rw-r--r--arch/arm64/kvm/Kconfig4
-rw-r--r--arch/arm64/mm/proc.S4
-rw-r--r--arch/avr32/boards/atngw100/mrmt.c1
-rw-r--r--arch/avr32/include/asm/atomic.h4
-rw-r--r--arch/c6x/platforms/megamod-pic.c2
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/cris/arch-v10/kernel/head.S106
-rw-r--r--arch/cris/arch-v10/kernel/kgdb.c89
-rw-r--r--arch/cris/arch-v10/mm/init.c14
-rw-r--r--arch/cris/arch-v32/Kconfig89
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig167
-rw-r--r--arch/cris/arch-v32/drivers/Makefile1
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c40
-rw-r--r--arch/cris/arch-v32/drivers/i2c.c751
-rw-r--r--arch/cris/arch-v32/drivers/i2c.h16
-rw-r--r--arch/cris/arch-v32/drivers/mach-a3/Makefile1
-rw-r--r--arch/cris/arch-v32/drivers/mach-a3/gpio.c999
-rw-r--r--arch/cris/arch-v32/drivers/mach-fs/Makefile1
-rw-r--r--arch/cris/arch-v32/drivers/mach-fs/gpio.c978
-rw-r--r--arch/cris/arch-v32/kernel/crisksyms.c3
-rw-r--r--arch/cris/arch-v32/kernel/debugport.c2
-rw-r--r--arch/cris/arch-v32/kernel/head.S4
-rw-r--r--arch/cris/arch-v32/kernel/irq.c6
-rw-r--r--arch/cris/arch-v32/kernel/kgdb.c96
-rw-r--r--arch/cris/arch-v32/kernel/setup.c8
-rw-r--r--arch/cris/arch-v32/mach-a3/Makefile2
-rw-r--r--arch/cris/arch-v32/mach-a3/io.c149
-rw-r--r--arch/cris/arch-v32/mach-fs/Kconfig19
-rw-r--r--arch/cris/arch-v32/mach-fs/Makefile2
-rw-r--r--arch/cris/arch-v32/mach-fs/io.c191
-rw-r--r--arch/cris/boot/dts/artpec3.dtsi46
-rw-r--r--arch/cris/boot/dts/dev88.dts49
-rw-r--r--arch/cris/boot/dts/etraxfs.dtsi8
l---------arch/cris/boot/dts/include/dt-bindings1
-rw-r--r--arch/cris/boot/dts/p1343.dts76
-rw-r--r--arch/cris/boot/rescue/head_v10.S3
-rw-r--r--arch/cris/include/arch-v32/arch/io.h140
-rw-r--r--arch/cris/include/arch-v32/arch/irq.h2
-rw-r--r--arch/cris/include/asm/eshlibld.h3
-rw-r--r--arch/cris/include/asm/io.h2
-rw-r--r--arch/cris/include/uapi/asm/etraxgpio.h157
-rw-r--r--arch/cris/kernel/crisksyms.c2
-rw-r--r--arch/cris/kernel/time.c25
-rw-r--r--arch/frv/include/asm/atomic.h4
-rw-r--r--arch/h8300/include/asm/atomic.h4
-rw-r--r--arch/hexagon/include/asm/atomic.h2
-rw-r--r--arch/ia64/include/asm/atomic.h8
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/m32r/include/asm/atomic.h4
-rw-r--r--arch/m68k/include/asm/atomic.h4
-rw-r--r--arch/m68k/sun3/idprom.c5
-rw-r--r--arch/metag/include/asm/atomic_lnkget.h2
-rw-r--r--arch/metag/include/asm/atomic_lock1.h2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c4
-rw-r--r--arch/mips/configs/pistachio_defconfig1
-rw-r--r--arch/mips/include/asm/atomic.h8
-rw-r--r--arch/mips/mti-sead3/Makefile2
-rw-r--r--arch/mn10300/include/asm/atomic.h4
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/powerpc/include/asm/cache.h7
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h17
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/kernel/dma.c2
-rw-r--r--arch/powerpc/kernel/rtas.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c55
-rw-r--r--arch/powerpc/perf/core-book3s.c36
-rw-r--r--arch/powerpc/perf/hv-24x7.c166
-rw-r--r--arch/powerpc/perf/power8-pmu.c3
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c2
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c9
-rw-r--r--arch/powerpc/platforms/pasemi/msi.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c2
-rw-r--r--arch/powerpc/platforms/powernv/smp.c29
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c3
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/sysdev/i8259.c3
-rw-r--r--arch/powerpc/sysdev/ipic.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c3
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c3
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c35
-rw-r--r--arch/sh/include/asm/atomic.h4
-rw-r--r--arch/sparc/include/asm/atomic_64.h8
-rw-r--r--arch/sparc/kernel/perf_event.c32
-rw-r--r--arch/tile/include/asm/atomic.h2
-rw-r--r--arch/tile/include/asm/atomic_64.h6
-rw-r--r--arch/um/Makefile4
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/os-Linux/helper.c6
-rw-r--r--arch/x86/Kconfig71
-rw-r--r--arch/x86/Makefile10
-rw-r--r--arch/x86/boot/compressed/eboot.c32
-rw-r--r--arch/x86/boot/header.S2
-rw-r--r--arch/x86/entry/common.c264
-rw-r--r--arch/x86/entry/entry_32.S182
-rw-r--r--arch/x86/entry/entry_64.S9
-rw-r--r--arch/x86/entry/entry_64_compat.S547
-rw-r--r--arch/x86/entry/syscall_32.c9
-rw-r--r--arch/x86/entry/syscall_64.c4
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl12
-rw-r--r--arch/x86/entry/vdso/Makefile39
-rw-r--r--arch/x86/entry/vdso/vdso2c.c2
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c28
-rw-r--r--arch/x86/entry/vdso/vdso32/int80.S56
-rw-r--r--arch/x86/entry/vdso/vdso32/syscall.S75
-rw-r--r--arch/x86/entry/vdso/vdso32/sysenter.S116
-rw-r--r--arch/x86/entry/vdso/vdso32/system_call.S57
-rw-r--r--arch/x86/entry/vdso/vma.c13
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c9
-rw-r--r--arch/x86/ia32/ia32_signal.c4
-rw-r--r--arch/x86/include/asm/acpi.h23
-rw-r--r--arch/x86/include/asm/amd_nb.h2
-rw-r--r--arch/x86/include/asm/apic.h110
-rw-r--r--arch/x86/include/asm/atomic.h4
-rw-r--r--arch/x86/include/asm/atomic64_64.h4
-rw-r--r--arch/x86/include/asm/dwarf2.h84
-rw-r--r--arch/x86/include/asm/efi.h1
-rw-r--r--arch/x86/include/asm/elf.h10
-rw-r--r--arch/x86/include/asm/hpet.h6
-rw-r--r--arch/x86/include/asm/kdebug.h6
-rw-r--r--arch/x86/include/asm/mce.h34
-rw-r--r--arch/x86/include/asm/microcode.h26
-rw-r--r--arch/x86/include/asm/microcode_amd.h3
-rw-r--r--arch/x86/include/asm/microcode_intel.h10
-rw-r--r--arch/x86/include/asm/numachip/numachip.h1
-rw-r--r--arch/x86/include/asm/numachip/numachip_csr.h153
-rw-r--r--arch/x86/include/asm/preempt.h5
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/include/asm/string_64.h5
-rw-r--r--arch/x86/include/asm/switch_to.h12
-rw-r--r--arch/x86/include/asm/syscall.h14
-rw-r--r--arch/x86/include/asm/thread_info.h3
-rw-r--r--arch/x86/include/asm/uaccess.h14
-rw-r--r--arch/x86/include/asm/vdso.h10
-rw-r--r--arch/x86/include/uapi/asm/mce.h2
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c220
-rw-r--r--arch/x86/kernel/apic/io_apic.c10
-rw-r--r--arch/x86/kernel/asm-offsets.c3
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c34
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c8
-rw-r--r--arch/x86/kernel/cpu/microcode/Makefile3
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c509
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_early.c440
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c232
-rw-r--r--arch/x86/kernel/cpu/microcode/core_early.c170
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c791
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_early.c808
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_lib.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event.c39
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_bts.c13
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cstate.c694
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c40
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_pt.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c61
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c34
-rw-r--r--arch/x86/kernel/crash.c3
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/early-quirks.c2
-rw-r--r--arch/x86/kernel/early_printk.c6
-rw-r--r--arch/x86/kernel/head_32.S5
-rw-r--r--arch/x86/kernel/hpet.c29
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/pci-dma.c5
-rw-r--r--arch/x86/kernel/process.c9
-rw-r--r--arch/x86/kernel/process_32.c8
-rw-r--r--arch/x86/kernel/process_64.c10
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/setup.c114
-rw-r--r--arch/x86/kernel/signal.c6
-rw-r--r--arch/x86/kernel/smpboot.c15
-rw-r--r--arch/x86/kernel/tsc.c35
-rw-r--r--arch/x86/lib/x86-opcode-map.txt24
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/pageattr.c9
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c9
-rw-r--r--arch/x86/platform/efi/efi.c28
-rw-r--r--arch/x86/ras/Kconfig4
-rw-r--r--arch/x86/ras/mce_amd_inj.c103
-rw-r--r--arch/x86/um/asm/syscall.h4
-rw-r--r--arch/x86/um/ldt.c5
-rw-r--r--arch/x86/um/sys_call_table_32.c7
-rw-r--r--arch/x86/um/sys_call_table_64.c7
-rw-r--r--arch/x86/xen/setup.c13
-rw-r--r--arch/xtensa/include/asm/atomic.h4
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-lib.c31
-rw-r--r--block/blk-mq-tag.c1
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-sysfs.c1
-rw-r--r--crypto/ablkcipher.c2
-rw-r--r--crypto/algapi.c2
-rw-r--r--crypto/api.c6
-rw-r--r--crypto/crypto_user.c2
-rw-r--r--drivers/acpi/apei/ghes.c10
-rw-r--r--drivers/acpi/gsi.c54
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/dma-contiguous.c2
-rw-r--r--drivers/base/pinctrl.c15
-rw-r--r--drivers/base/platform-msi.c6
-rw-r--r--drivers/base/regmap/internal.h3
-rw-r--r--drivers/base/regmap/regmap-debugfs.c23
-rw-r--r--drivers/base/regmap/regmap-irq.c43
-rw-r--r--drivers/base/regmap/regmap.c41
-rw-r--r--drivers/block/nbd.c36
-rw-r--r--drivers/block/nvme-core.c24
-rw-r--r--drivers/block/rbd.c72
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/bus/arm-ccn.c5
-rw-r--r--drivers/clk/clkdev.c3
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c54
-rw-r--r--drivers/clocksource/Kconfig4
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/arm_global_timer.c9
-rw-r--r--drivers/clocksource/em_sti.c2
-rw-r--r--drivers/clocksource/exynos_mct.c12
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c2
-rw-r--r--drivers/clocksource/h8300_timer16.c1
-rw-r--r--drivers/clocksource/h8300_timer8.c1
-rw-r--r--drivers/clocksource/h8300_tpu.c1
-rw-r--r--drivers/clocksource/mtk_timer.c26
-rw-r--r--drivers/clocksource/numachip.c95
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c2
-rw-r--r--drivers/clocksource/sh_cmt.c1
-rw-r--r--drivers/clocksource/sh_mtu2.c4
-rw-r--r--drivers/clocksource/tango_xtal.c66
-rw-r--r--drivers/clocksource/time-armada-370-xp.c14
-rw-r--r--drivers/clocksource/time-pistachio.c3
-rw-r--r--drivers/clocksource/timer-digicolor.c2
-rw-r--r--drivers/clocksource/timer-imx-gpt.c3
-rw-r--r--drivers/clocksource/timer-prima2.c2
-rw-r--r--drivers/clocksource/vf_pit_timer.c2
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/altera_edac.c20
-rw-r--r--drivers/edac/altera_edac.h5
-rw-r--r--drivers/edac/amd64_edac.c41
-rw-r--r--drivers/edac/amd64_edac.h58
-rw-r--r--drivers/edac/debugfs.c163
-rw-r--r--drivers/edac/edac_core.h2
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c150
-rw-r--r--drivers/edac/edac_module.h34
-rw-r--r--drivers/edac/ghes_edac.c24
-rw-r--r--drivers/edac/i5100_edac.c37
-rw-r--r--drivers/edac/ppc4xx_edac.c1
-rw-r--r--drivers/edac/sb_edac.c8
-rw-r--r--drivers/edac/xgene_edac.c1193
-rw-r--r--drivers/firmware/efi/Kconfig22
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/efi-pstore.c1
-rw-r--r--drivers/firmware/efi/efi.c107
-rw-r--r--drivers/firmware/efi/esrt.c19
-rw-r--r--drivers/firmware/efi/fake_mem.c238
-rw-r--r--drivers/gpio/Kconfig196
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/gpio-104-idio-16.c216
-rw-r--r--drivers/gpio/gpio-altera.c15
-rw-r--r--drivers/gpio/gpio-amdpt.c261
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-ath79.c119
-rw-r--r--drivers/gpio/gpio-etraxfs.c23
-rw-r--r--drivers/gpio/gpio-generic.c58
-rw-r--r--drivers/gpio/gpio-it87.c411
-rw-r--r--drivers/gpio/gpio-it8761e.c230
-rw-r--r--drivers/gpio/gpio-lpc18xx.c14
-rw-r--r--drivers/gpio/gpio-max730x.c1
-rw-r--r--drivers/gpio/gpio-moxart.c14
-rw-r--r--drivers/gpio/gpio-msm-v2.c453
-rw-r--r--drivers/gpio/gpio-mvebu.c14
-rw-r--r--drivers/gpio/gpio-omap.c82
-rw-r--r--drivers/gpio/gpio-pca953x.c45
-rw-r--r--drivers/gpio/gpio-pl061.c112
-rw-r--r--drivers/gpio/gpio-sodaville.c2
-rw-r--r--drivers/gpio/gpio-sx150x.c31
-rw-r--r--drivers/gpio/gpio-tb10x.c14
-rw-r--r--drivers/gpio/gpio-tz1090-pdc.c14
-rw-r--r--drivers/gpio/gpio-vf610.c43
-rw-r--r--drivers/gpio/gpio-xlp.c21
-rw-r--r--drivers/gpio/gpio-zx.c28
-rw-r--r--drivers/gpio/gpio-zynq.c32
-rw-r--r--drivers/gpio/gpiolib-acpi.c21
-rw-r--r--drivers/gpio/gpiolib-legacy.c8
-rw-r--r--drivers/gpio/gpiolib-of.c20
-rw-r--r--drivers/gpio/gpiolib.c175
-rw-r--r--drivers/gpio/gpiolib.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c120
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c34
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/abx500.c2
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/hwmon/fam15h_power.c87
-rw-r--r--drivers/hwmon/ibmpowernv.c7
-rw-r--r--drivers/hwmon/ina2xx.c243
-rw-r--r--drivers/hwmon/lm75.c7
-rw-r--r--drivers/hwmon/max31790.c603
-rw-r--r--drivers/hwmon/nct6775.c101
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c2
-rw-r--r--drivers/i2c/busses/i2c-pnx.c10
-rw-r--r--drivers/iio/accel/st_accel_core.c6
-rw-r--r--drivers/iio/adc/twl4030-madc.c34
-rw-r--r--drivers/infiniband/core/cache.c2
-rw-r--r--drivers/infiniband/core/cm.c10
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c35
-rw-r--r--drivers/infiniband/core/ucma.c7
-rw-r--r--drivers/input/mouse/alps.c48
-rw-r--r--drivers/input/touchscreen/Kconfig1
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c4
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd_iommu_v2.c7
-rw-r--r--drivers/iommu/intel-iommu.c12
-rw-r--r--drivers/irqchip/Kconfig6
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/alphascale_asm9260-icoll.h109
-rw-r--r--drivers/irqchip/exynos-combiner.c2
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c1
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c62
-rw-r--r--drivers/irqchip/irq-crossbar.c62
-rw-r--r--drivers/irqchip/irq-gic-common.c11
-rw-r--r--drivers/irqchip/irq-gic-common.h9
-rw-r--r--drivers/irqchip/irq-gic-v2m.c163
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c7
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c21
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c83
-rw-r--r--drivers/irqchip/irq-gic-v3.c161
-rw-r--r--drivers/irqchip/irq-gic.c110
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-i8259.c4
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c64
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c49
-rw-r--r--drivers/irqchip/irq-mxs.c171
-rw-r--r--drivers/irqchip/irq-nvic.c18
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c6
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c86
-rw-r--r--drivers/irqchip/irq-s3c24xx.c4
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c22
-rw-r--r--drivers/irqchip/irq-tegra.c56
-rw-r--r--drivers/irqchip/irq-vf610-mscm-ir.c45
-rw-r--r--drivers/isdn/hisax/isdnl2.c20
-rw-r--r--drivers/isdn/mISDN/layer2.c54
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-class.c69
-rw-r--r--drivers/leds/led-core.c73
-rw-r--r--drivers/leds/leds-88pm860x.c1
-rw-r--r--drivers/leds/leds-bcm6328.c45
-rw-r--r--drivers/leds/leds-bcm6358.c4
-rw-r--r--drivers/leds/leds-cobalt-qube.c23
-rw-r--r--drivers/leds/leds-gpio.c13
-rw-r--r--drivers/leds/leds-hp6xx.c17
-rw-r--r--drivers/leds/leds-ipaq-micro.c27
-rw-r--r--drivers/leds/leds-locomo.c15
-rw-r--r--drivers/leds/leds-menf21bmc.c26
-rw-r--r--drivers/leds/leds-net48xx.c9
-rw-r--r--drivers/leds/leds-netxbig.c336
-rw-r--r--drivers/leds/leds-ot200.c21
-rw-r--r--drivers/leds/leds-powernv.c8
-rw-r--r--drivers/leds/leds-sead3.c (renamed from arch/mips/mti-sead3/leds-sead3.c)1
-rw-r--r--drivers/leds/leds-wrap.c28
-rw-r--r--drivers/leds/leds.h1
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c47
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c17
-rw-r--r--drivers/md/persistent-data/dm-btree.c2
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c39
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/dvb-frontends/horus3a.h4
-rw-r--r--drivers/media/dvb-frontends/lnbh25.h2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c73
-rw-r--r--drivers/media/dvb-frontends/si2168.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_spi.c12
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c7
-rw-r--r--drivers/media/rc/ir-hix5hd2.c2
-rw-r--r--drivers/media/tuners/si2157.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.h2
-rw-r--r--drivers/media/v4l2-core/Kconfig2
-rw-r--r--drivers/memory/Kconfig12
-rw-r--r--drivers/memory/omap-gpmc.c2
-rw-r--r--drivers/mmc/card/block.c199
-rw-r--r--drivers/mmc/card/mmc_test.c9
-rw-r--r--drivers/mmc/core/Kconfig10
-rw-r--r--drivers/mmc/core/core.c291
-rw-r--r--drivers/mmc/core/core.h3
-rw-r--r--drivers/mmc/core/debugfs.c30
-rw-r--r--drivers/mmc/core/host.c247
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/core/mmc_ops.c9
-rw-r--r--drivers/mmc/core/mmc_ops.h3
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c8
-rw-r--r--drivers/mmc/core/pwrseq_simple.c45
-rw-r--r--drivers/mmc/core/quirks.c18
-rw-r--r--drivers/mmc/core/sd.c27
-rw-r--r--drivers/mmc/core/sdio.c27
-rw-r--r--drivers/mmc/core/sdio_irq.c14
-rw-r--r--drivers/mmc/core/sdio_ops.h7
-rw-r--r--drivers/mmc/host/Kconfig21
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c4
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c2
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c162
-rw-r--r--drivers/mmc/host/dw_mmc.c290
-rw-r--r--drivers/mmc/host/dw_mmc.h13
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mmc/host/moxart-mmc.c1
-rw-r--r--drivers/mmc/host/mtk-sd.c306
-rw-r--r--drivers/mmc/host/omap.c1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c11
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-msm.c2
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c463
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c (renamed from drivers/mmc/host/sdhci-pci.c)62
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.h2
-rw-r--r--drivers/mmc/host/sdhci-pci.h7
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c6
-rw-r--r--drivers/mmc/host/sdhci-sirf.c5
-rw-r--r--drivers/mmc/host/sdhci.c29
-rw-r--r--drivers/mmc/host/sunxi-mmc.c8
-rw-r--r--drivers/mmc/host/vub300.c6
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c33
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c42
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c24
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c14
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c30
-rw-r--r--drivers/net/ethernet/ti/cpsw.c15
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/geneve.c40
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83848.c99
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c2
-rw-r--r--drivers/net/phy/mdio-mux.c1
-rw-r--r--drivers/net/phy/micrel.c23
-rw-r--r--drivers/net/phy/smsc.c19
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/vxlan.c41
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1
-rw-r--r--drivers/net/xen-netfront.c14
-rw-r--r--drivers/of/irq.c185
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/msi.c63
-rw-r--r--drivers/pci/of.c13
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/probe.c43
-rw-r--r--drivers/perf/arm_pmu.c10
-rw-r--r--drivers/phy/phy-rcar-gen2.c3
-rw-r--r--drivers/pinctrl/Kconfig13
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c14
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c131
-rw-r--r--drivers/pinctrl/berlin/Kconfig16
-rw-r--r--drivers/pinctrl/berlin/Makefile1
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2.c4
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2cd.c66
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2q.c4
-rw-r--r--drivers/pinctrl/berlin/berlin-bg4ct.c503
-rw-r--r--drivers/pinctrl/berlin/berlin.c28
-rw-r--r--drivers/pinctrl/berlin/berlin.h6
-rw-r--r--drivers/pinctrl/core.c32
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c36
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7d.c30
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c2
-rw-r--r--drivers/pinctrl/intel/Kconfig8
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c1065
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c14
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c122
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c18
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c18
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c22
-rw-r--r--drivers/pinctrl/pinconf-generic.c35
-rw-r--r--drivers/pinctrl/pinconf.c13
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c14
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c14
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c1094
-rw-r--r--drivers/pinctrl/pinctrl-at91.c31
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c22
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c14
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c14
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c31
-rw-r--r--drivers/pinctrl/pinctrl-st.c14
-rw-r--r--drivers/pinctrl/pinctrl-tegra-xusb.c9
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c4
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c4
-rw-r--r--drivers/pinctrl/pinctrl-xway.c18
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c14
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig5
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c16
-rw-r--r--drivers/pinctrl/sh-pfc/core.h3
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c37
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-emev2.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a73a4.c124
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c68
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c266
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c425
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c1062
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c1103
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c1035
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c2816
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7203.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c68
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7720.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7722.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7723.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7724.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c782
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7785.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7786.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-shx3.c4
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h93
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c1828
-rw-r--r--drivers/pinctrl/sunxi/Kconfig4
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c24
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c603
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c22
-rw-r--r--drivers/pinctrl/uniphier/Kconfig14
-rw-r--r--drivers/pinctrl/uniphier/Makefile2
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c5
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c5
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c5
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c5
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-proxstream2.c5
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c10
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c14
-rw-r--r--drivers/pps/kapi.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/scsi_dh.c8
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/spmi/spmi-pmic-arb.c2
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c2
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c9
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/tty/serial/8250/8250_dma.c4
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c30
-rw-r--r--drivers/usb/musb/omap2430.c29
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.c1
-rw-r--r--drivers/vhost/vhost.h7
-rw-r--r--drivers/video/console/fbcon.c1
-rw-r--r--drivers/video/fbdev/efifb.c24
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/file.c42
-rw-r--r--fs/fs-writeback.c35
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c3
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/overlayfs/copy_up.c6
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/super.c2
-rw-r--r--fs/proc/array.c16
-rw-r--r--fs/proc/base.c9
-rw-r--r--fs/proc/meminfo.c7
-rw-r--r--include/asm-generic/atomic-long.h55
-rw-r--r--include/asm-generic/atomic.h4
-rw-r--r--include/asm-generic/mutex-dec.h8
-rw-r--r--include/asm-generic/mutex-xchg.h10
-rw-r--r--include/asm-generic/pgtable.h37
-rw-r--r--include/asm-generic/preempt.h2
-rw-r--r--include/asm-generic/qrwlock_types.h4
-rw-r--r--include/asm-generic/rwsem.h21
-rw-r--r--include/dt-bindings/gpio/gpio.h12
-rw-r--r--include/dt-bindings/leds/leds-netxbig.h18
-rw-r--r--include/kvm/arm_vgic.h4
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/atomic.h100
-rw-r--r--include/linux/backing-dev-defs.h3
-rw-r--r--include/linux/backing-dev.h69
-rw-r--r--include/linux/blk-cgroup.h4
-rw-r--r--include/linux/cma.h2
-rw-r--r--include/linux/compiler-gcc.h13
-rw-r--r--include/linux/compiler.h82
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/dma-contiguous.h4
-rw-r--r--include/linux/edac.h2
-rw-r--r--include/linux/efi.h25
-rw-r--r--include/linux/fdtable.h2
-rw-r--r--include/linux/fwnode.h1
-rw-r--r--include/linux/gpio/consumer.h1
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/init_task.h3
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/irq.h23
-rw-r--r--include/linux/irqchip/arm-gic-v3.h105
-rw-r--r--include/linux/irqchip/arm-gic.h9
-rw-r--r--include/linux/irqdomain.h106
-rw-r--r--include/linux/irqreturn.h2
-rw-r--r--include/linux/list.h5
-rw-r--r--include/linux/list_bl.h5
-rw-r--r--include/linux/list_nulls.h3
-rw-r--r--include/linux/memcontrol.h8
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/core.h4
-rw-r--r--include/linux/mmc/dw_mmc.h23
-rw-r--r--include/linux/mmc/host.h39
-rw-r--r--include/linux/msi.h16
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/of_gpio.h1
-rw-r--r--include/linux/of_irq.h23
-rw-r--r--include/linux/omap-dma.h2
-rw-r--r--include/linux/percpu-rwsem.h3
-rw-r--r--include/linux/perf_event.h120
-rw-r--r--include/linux/pinctrl/devinfo.h10
-rw-r--r--include/linux/pinctrl/pinconf-generic.h64
-rw-r--r--include/linux/pinctrl/pinctrl-state.h8
-rw-r--r--include/linux/platform_data/leds-kirkwood-netxbig.h1
-rw-r--r--include/linux/pps_kernel.h16
-rw-r--r--include/linux/preempt.h20
-rw-r--r--include/linux/rcu_sync.h86
-rw-r--r--include/linux/rculist.h5
-rw-r--r--include/linux/rcupdate.h59
-rw-r--r--include/linux/rcutiny.h3
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regmap.h11
-rw-r--r--include/linux/sched.h55
-rw-r--r--include/linux/sched/deadline.h5
-rw-r--r--include/linux/smpboot.h4
-rw-r--r--include/linux/srcu.h5
-rw-r--r--include/linux/stop_machine.h2
-rw-r--r--include/linux/timekeeping.h4
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/vmalloc.h12
-rw-r--r--include/net/dst_metadata.h32
-rw-r--r--include/sound/soc.h6
-rw-r--r--include/sound/wm8904.h2
-rw-r--r--include/trace/events/sched.h22
-rw-r--r--include/uapi/linux/mmc/ioctl.h19
-rw-r--r--include/uapi/linux/openvswitch.h3
-rw-r--r--include/uapi/linux/perf_event.h6
-rw-r--r--include/uapi/linux/screen_info.h5
-rw-r--r--kernel/cpu.c23
-rw-r--r--kernel/events/core.c224
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c13
-rw-r--r--kernel/irq/Kconfig4
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c28
-rw-r--r--kernel/irq/cpuhotplug.c82
-rw-r--r--kernel/irq/handle.c7
-rw-r--r--kernel/irq/internals.h4
-rw-r--r--kernel/irq/irqdomain.c177
-rw-r--r--kernel/irq/manage.c221
-rw-r--r--kernel/irq/msi.c8
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/irq/settings.h12
-rw-r--r--kernel/kexec_core.c6
-rw-r--r--kernel/kmod.c8
-rw-r--r--kernel/locking/locktorture.c164
-rw-r--r--kernel/locking/mcs_spinlock.h4
-rw-r--r--kernel/locking/mutex.c9
-rw-r--r--kernel/locking/osq_lock.c11
-rw-r--r--kernel/locking/percpu-rwsem.c90
-rw-r--r--kernel/locking/qrwlock.c8
-rw-r--r--kernel/locking/qspinlock_paravirt.h6
-rw-r--r--kernel/locking/rtmutex.c33
-rw-r--r--kernel/locking/rwsem-xadd.c5
-rw-r--r--kernel/memremap.c14
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/rcu/Makefile2
-rw-r--r--kernel/rcu/rcutorture.c16
-rw-r--r--kernel/rcu/srcu.c4
-rw-r--r--kernel/rcu/sync.c223
-rw-r--r--kernel/rcu/tiny.c8
-rw-r--r--kernel/rcu/tree.c512
-rw-r--r--kernel/rcu/tree.h69
-rw-r--r--kernel/rcu/tree_plugin.h437
-rw-r--r--kernel/rcu/tree_trace.c10
-rw-r--r--kernel/rcu/update.c2
-rw-r--r--kernel/sched/core.c216
-rw-r--r--kernel/sched/cpudeadline.c5
-rw-r--r--kernel/sched/cpudeadline.h1
-rw-r--r--kernel/sched/deadline.c17
-rw-r--r--kernel/sched/fair.c428
-rw-r--r--kernel/sched/features.h21
-rw-r--r--kernel/sched/idle.c2
-rw-r--r--kernel/sched/rt.c22
-rw-r--r--kernel/sched/sched.h55
-rw-r--r--kernel/smpboot.c5
-rw-r--r--kernel/stop_machine.c90
-rw-r--r--kernel/time/clocksource.c7
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/ntp.c16
-rw-r--r--kernel/time/ntp_internal.h2
-rw-r--r--kernel/time/posix-cpu-timers.c63
-rw-r--r--kernel/time/timeconst.bc2
-rw-r--r--kernel/time/timekeeping.c18
-rw-r--r--kernel/time/timer.c13
-rw-r--r--kernel/torture.c1
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace_sched_switch.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_stack.c11
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/fault-inject.c2
-rw-r--r--lib/nmi_backtrace.c11
-rw-r--r--mm/backing-dev.c36
-rw-r--r--mm/cma.c4
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/memcontrol.c35
-rw-r--r--mm/page-writeback.c54
-rw-r--r--mm/pgtable-generic.c100
-rw-r--r--mm/vmalloc.c47
-rw-r--r--net/core/dev.c27
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/ip_gre.c46
-rw-r--r--net/ipv4/netfilter/Kconfig1
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c4
-rw-r--r--net/ipv4/tcp_dctcp.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv6/fib6_rules.c19
-rw-r--r--net/ipv6/ip6_fib.c12
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/ipv6/netfilter/Kconfig1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c1
-rw-r--r--net/ipv6/route.c9
-rw-r--r--net/ipv6/xfrm6_output.c18
-rw-r--r--net/ipv6/xfrm6_policy.c6
-rw-r--r--net/irda/irlmp.c2
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/actions.c13
-rw-r--r--net/openvswitch/conntrack.c48
-rw-r--r--net/openvswitch/conntrack.h17
-rw-r--r--net/openvswitch/datapath.c5
-rw-r--r--net/openvswitch/datapath.h1
-rw-r--r--net/openvswitch/flow_netlink.c23
-rw-r--r--net/openvswitch/flow_netlink.h6
-rw-r--r--net/openvswitch/vport-geneve.c13
-rw-r--r--net/openvswitch/vport-gre.c8
-rw-r--r--net/openvswitch/vport-internal_dev.c46
-rw-r--r--net/openvswitch/vport-vxlan.c19
-rw-r--r--net/openvswitch/vport.c58
-rw-r--r--net/openvswitch/vport.h35
-rw-r--r--net/rds/tcp_recv.c11
-rw-r--r--net/sysctl_net.c6
-rw-r--r--net/tipc/bcast.c8
-rw-r--r--net/tipc/msg.c12
-rw-r--r--net/tipc/udp_media.c5
-rw-r--r--net/vmw_vsock/af_vsock.c7
-rw-r--r--net/vmw_vsock/vmci_transport.c173
-rw-r--r--net/vmw_vsock/vmci_transport.h4
-rw-r--r--net/xfrm/xfrm_user.c4
-rw-r--r--samples/bpf/bpf_helpers.h12
-rw-r--r--sound/hda/ext/hdac_ext_bus.c1
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/soc/codecs/rt298.c26
-rw-r--r--sound/soc/codecs/wm8962.c2
-rw-r--r--sound/soc/soc-ops.c28
-rw-r--r--tools/build/.gitignore1
-rw-r--r--tools/build/Build1
-rw-r--r--tools/build/Build.include17
-rw-r--r--tools/build/Documentation/Build.txt52
-rw-r--r--tools/build/Makefile43
-rw-r--r--tools/build/Makefile.build7
-rw-r--r--tools/build/Makefile.feature15
-rw-r--r--tools/build/Makefile.include10
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/fixdep.c168
-rw-r--r--tools/build/tests/ex/Build1
-rw-r--r--tools/build/tests/ex/Makefile13
-rw-r--r--tools/build/tests/ex/ex.c2
-rw-r--r--tools/build/tests/ex/inc.c8
-rwxr-xr-xtools/build/tests/run.sh27
-rw-r--r--tools/include/linux/compiler.h32
-rw-r--r--tools/include/linux/err.h49
-rw-r--r--tools/include/linux/filter.h231
-rw-r--r--tools/lib/api/Build1
-rw-r--r--tools/lib/api/Makefile6
-rw-r--r--tools/lib/api/cpu.c18
-rw-r--r--tools/lib/api/cpu.h6
-rw-r--r--tools/lib/api/fs/Build4
-rw-r--r--tools/lib/api/fs/debugfs.c129
-rw-r--r--tools/lib/api/fs/debugfs.h23
-rw-r--r--tools/lib/api/fs/findfs.c63
-rw-r--r--tools/lib/api/fs/findfs.h23
-rw-r--r--tools/lib/api/fs/fs.c165
-rw-r--r--tools/lib/api/fs/fs.h30
-rw-r--r--tools/lib/api/fs/tracefs.c78
-rw-r--r--tools/lib/api/fs/tracefs.h21
-rw-r--r--tools/lib/api/fs/tracing_path.c135
-rw-r--r--tools/lib/api/fs/tracing_path.h16
-rw-r--r--tools/lib/bpf/Makefile11
-rw-r--r--tools/lib/lockdep/Makefile6
-rw-r--r--tools/lib/symbol/kallsyms.c6
-rw-r--r--tools/lib/symbol/kallsyms.h4
-rw-r--r--tools/lib/traceevent/event-parse.c60
-rw-r--r--tools/lib/traceevent/event-parse.h1
-rw-r--r--tools/lib/traceevent/plugin_kvm.c25
-rw-r--r--tools/perf/Documentation/intel-pt.txt44
-rw-r--r--tools/perf/Documentation/itrace.txt4
-rw-r--r--tools/perf/Documentation/perf-bench.txt54
-rw-r--r--tools/perf/Documentation/perf-inject.txt3
-rw-r--r--tools/perf/Documentation/perf-list.txt3
-rw-r--r--tools/perf/Documentation/perf-record.txt16
-rw-r--r--tools/perf/Documentation/perf-report.txt48
-rw-r--r--tools/perf/Documentation/perf-script.txt17
-rw-r--r--tools/perf/Documentation/perf-stat.txt5
-rw-r--r--tools/perf/Documentation/perf-top.txt5
-rw-r--r--tools/perf/Documentation/perf.txt8
-rw-r--r--tools/perf/MANIFEST5
-rw-r--r--tools/perf/Makefile.perf53
-rw-r--r--tools/perf/arch/common.c10
-rw-r--r--tools/perf/arch/common.h4
-rw-r--r--tools/perf/arch/x86/Build2
-rw-r--r--tools/perf/arch/x86/Makefile1
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h19
-rw-r--r--tools/perf/arch/x86/tests/Build10
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c34
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/x86/tests/gen-insn-x86-dat.awk75
-rwxr-xr-xtools/perf/arch/x86/tests/gen-insn-x86-dat.sh43
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-32.c658
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-64.c768
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-src.c877
-rw-r--r--tools/perf/arch/x86/tests/insn-x86.c185
-rw-r--r--tools/perf/arch/x86/tests/intel-cqm.c124
-rw-r--r--tools/perf/arch/x86/tests/perf-time-to-tsc.c (renamed from tools/perf/tests/perf-time-to-tsc.c)4
-rw-r--r--tools/perf/arch/x86/tests/rdpmc.c (renamed from tools/perf/tests/rdpmc.c)7
-rw-r--r--tools/perf/arch/x86/util/dwarf-regs.c122
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c55
-rw-r--r--tools/perf/bench/Build2
-rw-r--r--tools/perf/bench/mem-functions.c379
-rw-r--r--tools/perf/bench/mem-memcpy.c434
-rw-r--r--tools/perf/bench/numa.c4
-rw-r--r--tools/perf/bench/sched-messaging.c10
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-bench.c14
-rw-r--r--tools/perf/builtin-evlist.c4
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-inject.c127
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-kvm.c1
-rw-r--r--tools/perf/builtin-list.c20
-rw-r--r--tools/perf/builtin-probe.c147
-rw-r--r--tools/perf/builtin-record.c56
-rw-r--r--tools/perf/builtin-report.c65
-rw-r--r--tools/perf/builtin-sched.c4
-rw-r--r--tools/perf/builtin-script.c113
-rw-r--r--tools/perf/builtin-stat.c93
-rw-r--r--tools/perf/builtin-top.c59
-rw-r--r--tools/perf/builtin-trace.c31
-rw-r--r--tools/perf/config/Makefile23
-rw-r--r--tools/perf/perf.c30
-rwxr-xr-xtools/perf/python/twatch.py23
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py221
-rw-r--r--tools/perf/tests/Build4
-rw-r--r--tools/perf/tests/bpf-script-example.c44
-rw-r--r--tools/perf/tests/builtin-test.c76
-rw-r--r--tools/perf/tests/code-reading.c76
-rw-r--r--tools/perf/tests/dwarf-unwind.c4
-rw-r--r--tools/perf/tests/evsel-tp-sched.c10
-rw-r--r--tools/perf/tests/hists_filter.c55
-rw-r--r--tools/perf/tests/make4
-rw-r--r--tools/perf/tests/mmap-basic.c3
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c13
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c5
-rw-r--r--tools/perf/tests/openat-syscall.c13
-rw-r--r--tools/perf/tests/parse-events.c49
-rw-r--r--tools/perf/tests/tests.h10
-rw-r--r--tools/perf/tests/topology.c115
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c4
-rw-r--r--tools/perf/trace/strace/groups/file2
-rw-r--r--tools/perf/ui/browser.c14
-rw-r--r--tools/perf/ui/browser.h2
-rw-r--r--tools/perf/ui/browsers/annotate.c14
-rw-r--r--tools/perf/ui/browsers/hists.c120
-rw-r--r--tools/perf/ui/browsers/map.c2
-rw-r--r--tools/perf/ui/browsers/scripts.c2
-rw-r--r--tools/perf/ui/hist.c16
-rw-r--r--tools/perf/ui/tui/setup.c8
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/annotate.c5
-rw-r--r--tools/perf/util/annotate.h2
-rw-r--r--tools/perf/util/auxtrace.c24
-rw-r--r--tools/perf/util/auxtrace.h4
-rw-r--r--tools/perf/util/bpf-loader.c352
-rw-r--r--tools/perf/util/bpf-loader.h85
-rw-r--r--tools/perf/util/callchain.c42
-rw-r--r--tools/perf/util/callchain.h26
-rw-r--r--tools/perf/util/cpumap.c97
-rw-r--r--tools/perf/util/cpumap.h10
-rw-r--r--tools/perf/util/env.c86
-rw-r--r--tools/perf/util/env.h44
-rw-r--r--tools/perf/util/event.c22
-rw-r--r--tools/perf/util/event.h6
-rw-r--r--tools/perf/util/evlist.c47
-rw-r--r--tools/perf/util/evlist.h5
-rw-r--r--tools/perf/util/evsel.c50
-rw-r--r--tools/perf/util/evsel.h10
-rw-r--r--tools/perf/util/header.c136
-rw-r--r--tools/perf/util/header.h27
-rw-r--r--tools/perf/util/hist.c59
-rw-r--r--tools/perf/util/hist.h8
-rw-r--r--tools/perf/util/include/dwarf-regs.h8
-rw-r--r--tools/perf/util/intel-pt-decoder/Build13
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.c21
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.h38
-rw-r--r--tools/perf/util/intel-pt-decoder/x86-opcode-map.txt24
-rw-r--r--tools/perf/util/intel-pt.c266
-rw-r--r--tools/perf/util/machine.c29
-rw-r--r--tools/perf/util/machine.h9
-rw-r--r--tools/perf/util/map.c21
-rw-r--r--tools/perf/util/map.h2
-rw-r--r--tools/perf/util/parse-branch-options.c1
-rw-r--r--tools/perf/util/parse-events.c374
-rw-r--r--tools/perf/util/parse-events.h16
-rw-r--r--tools/perf/util/parse-events.l12
-rw-r--r--tools/perf/util/parse-events.y82
-rw-r--r--tools/perf/util/parse-options.c156
-rw-r--r--tools/perf/util/parse-options.h5
-rw-r--r--tools/perf/util/pmu.c42
-rw-r--r--tools/perf/util/probe-event.c234
-rw-r--r--tools/perf/util/probe-event.h11
-rw-r--r--tools/perf/util/probe-file.c56
-rw-r--r--tools/perf/util/probe-file.h4
-rw-r--r--tools/perf/util/probe-finder.c58
-rw-r--r--tools/perf/util/python.c59
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c3
-rw-r--r--tools/perf/util/session.c37
-rw-r--r--tools/perf/util/session.h2
-rw-r--r--tools/perf/util/sort.c74
-rw-r--r--tools/perf/util/sort.h6
-rw-r--r--tools/perf/util/srcline.c29
-rw-r--r--tools/perf/util/stat.c13
-rw-r--r--tools/perf/util/stat.h3
-rw-r--r--tools/perf/util/strbuf.c22
-rw-r--r--tools/perf/util/strbuf.h2
-rw-r--r--tools/perf/util/symbol-minimal.c2
-rw-r--r--tools/perf/util/symbol.c31
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/trace-event-info.c2
-rw-r--r--tools/perf/util/trace-event.c16
-rw-r--r--tools/perf/util/trace-event.h2
-rw-r--r--tools/perf/util/unwind-libunwind.c19
-rw-r--r--tools/perf/util/usage.c5
-rw-r--r--tools/perf/util/util.c72
-rw-r--r--tools/perf/util/util.h12
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh6
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/CFLIST4
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK056
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK066
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK06.boot1
-rw-r--r--tools/testing/selftests/timers/Makefile3
-rw-r--r--tools/testing/selftests/timers/adjtick.c221
-rw-r--r--tools/testing/selftests/x86/Makefile6
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c4
-rw-r--r--tools/testing/selftests/x86/ptrace_syscall.c294
-rw-r--r--tools/testing/selftests/x86/raw_syscall_helper_32.S46
-rw-r--r--tools/testing/selftests/x86/test_syscall_vdso.c401
-rw-r--r--tools/testing/selftests/x86/thunks_32.S55
-rw-r--r--tools/testing/selftests/x86/unwind_vdso.c211
-rw-r--r--tools/vm/page-types.c6
-rw-r--r--virt/kvm/arm/arch_timer.c19
-rw-r--r--virt/kvm/arm/vgic.c99
1225 files changed, 40765 insertions, 19734 deletions
diff --git a/.mailmap b/.mailmap
index 4b31af54ccd5..b1e9a97653dc 100644
--- a/.mailmap
+++ b/.mailmap
@@ -59,6 +59,7 @@ James Bottomley <jejb@mulgrave.(none)>
59James Bottomley <jejb@titanic.il.steeleye.com> 59James Bottomley <jejb@titanic.il.steeleye.com>
60James E Wilson <wilson@specifix.com> 60James E Wilson <wilson@specifix.com>
61James Ketrenos <jketreno@io.(none)> 61James Ketrenos <jketreno@io.(none)>
62<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
62Jean Tourrilhes <jt@hpl.hp.com> 63Jean Tourrilhes <jt@hpl.hp.com>
63Jeff Garzik <jgarzik@pretzel.yyz.us> 64Jeff Garzik <jgarzik@pretzel.yyz.us>
64Jens Axboe <axboe@suse.de> 65Jens Axboe <axboe@suse.de>
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
index 3a8e15cba816..8d990bde8693 100644
--- a/Documentation/IRQ-domain.txt
+++ b/Documentation/IRQ-domain.txt
@@ -32,9 +32,9 @@ top of the irq_alloc_desc*() API. An irq_domain to manage mapping is
32preferred over interrupt controller drivers open coding their own 32preferred over interrupt controller drivers open coding their own
33reverse mapping scheme. 33reverse mapping scheme.
34 34
35irq_domain also implements translation from Device Tree interrupt 35irq_domain also implements translation from an abstract irq_fwspec
36specifiers to hwirq numbers, and can be easily extended to support 36structure to hwirq numbers (Device Tree and ACPI GSI so far), and can
37other IRQ topology data sources. 37be easily extended to support other IRQ topology data sources.
38 38
39=== irq_domain usage === 39=== irq_domain usage ===
40An interrupt controller driver creates and registers an irq_domain by 40An interrupt controller driver creates and registers an irq_domain by
@@ -184,7 +184,7 @@ There are four major interfaces to use hierarchy irq_domain:
184 related resources associated with these interrupts. 184 related resources associated with these interrupts.
1853) irq_domain_activate_irq(): activate interrupt controller hardware to 1853) irq_domain_activate_irq(): activate interrupt controller hardware to
186 deliver the interrupt. 186 deliver the interrupt.
1873) irq_domain_deactivate_irq(): deactivate interrupt controller hardware 1874) irq_domain_deactivate_irq(): deactivate interrupt controller hardware
188 to stop delivering the interrupt. 188 to stop delivering the interrupt.
189 189
190Following changes are needed to support hierarchy irq_domain. 190Following changes are needed to support hierarchy irq_domain.
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index efb9454875ab..0f7fb4298e7e 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -205,6 +205,13 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
205 behavior, you might need to replace some of the cond_resched() 205 behavior, you might need to replace some of the cond_resched()
206 calls with calls to cond_resched_rcu_qs(). 206 calls with calls to cond_resched_rcu_qs().
207 207
208o Booting Linux using a console connection that is too slow to
209 keep up with the boot-time console-message rate. For example,
210 a 115Kbaud serial console can be -way- too slow to keep up
211 with boot-time message rates, and will frequently result in
212 RCU CPU stall warning messages. Especially if you have added
213 debug printk()s.
214
208o Anything that prevents RCU's grace-period kthreads from running. 215o Anything that prevents RCU's grace-period kthreads from running.
209 This can result in the "All QSes seen" console-log message. 216 This can result in the "All QSes seen" console-log message.
210 This message will include information on when the kthread last 217 This message will include information on when the kthread last
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index dac02a6219b1..118e7c176ce7 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -166,40 +166,27 @@ test_no_idle_hz Whether or not to test the ability of RCU to operate in
166 166
167torture_type The type of RCU to test, with string values as follows: 167torture_type The type of RCU to test, with string values as follows:
168 168
169 "rcu": rcu_read_lock(), rcu_read_unlock() and call_rcu(). 169 "rcu": rcu_read_lock(), rcu_read_unlock() and call_rcu(),
170 170 along with expedited, synchronous, and polling
171 "rcu_sync": rcu_read_lock(), rcu_read_unlock(), and 171 variants.
172 synchronize_rcu().
173
174 "rcu_expedited": rcu_read_lock(), rcu_read_unlock(), and
175 synchronize_rcu_expedited().
176 172
177 "rcu_bh": rcu_read_lock_bh(), rcu_read_unlock_bh(), and 173 "rcu_bh": rcu_read_lock_bh(), rcu_read_unlock_bh(), and
178 call_rcu_bh(). 174 call_rcu_bh(), along with expedited and synchronous
179 175 variants.
180 "rcu_bh_sync": rcu_read_lock_bh(), rcu_read_unlock_bh(),
181 and synchronize_rcu_bh().
182 176
183 "rcu_bh_expedited": rcu_read_lock_bh(), rcu_read_unlock_bh(), 177 "rcu_busted": This tests an intentionally incorrect version
184 and synchronize_rcu_bh_expedited(). 178 of RCU in order to help test rcutorture itself.
185 179
186 "srcu": srcu_read_lock(), srcu_read_unlock() and 180 "srcu": srcu_read_lock(), srcu_read_unlock() and
187 call_srcu(). 181 call_srcu(), along with expedited and
188 182 synchronous variants.
189 "srcu_sync": srcu_read_lock(), srcu_read_unlock() and
190 synchronize_srcu().
191
192 "srcu_expedited": srcu_read_lock(), srcu_read_unlock() and
193 synchronize_srcu_expedited().
194 183
195 "sched": preempt_disable(), preempt_enable(), and 184 "sched": preempt_disable(), preempt_enable(), and
196 call_rcu_sched(). 185 call_rcu_sched(), along with expedited,
197 186 synchronous, and polling variants.
198 "sched_sync": preempt_disable(), preempt_enable(), and
199 synchronize_sched().
200 187
201 "sched_expedited": preempt_disable(), preempt_enable(), and 188 "tasks": voluntary context switch and call_rcu_tasks(),
202 synchronize_sched_expedited(). 189 along with expedited and synchronous variants.
203 190
204 Defaults to "rcu". 191 Defaults to "rcu".
205 192
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 97f17e9decda..ec6998b1b6d0 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -56,14 +56,14 @@ rcuboost:
56 56
57The output of "cat rcu/rcu_preempt/rcudata" looks as follows: 57The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
58 58
59 0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716 59 0!c=30455 g=30456 cnq=1/0:1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
60 1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982 60 1!c=30719 g=30720 cnq=1/0:0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
61 2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458 61 2!c=30150 g=30151 cnq=1/1:1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
62 3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622 62 3 c=31249 g=31250 cnq=1/1:0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
63 4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521 63 4!c=29502 g=29503 cnq=1/0:1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
64 5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698 64 5 c=31201 g=31202 cnq=1/0:1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
65 6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353 65 6!c=30253 g=30254 cnq=1/0:1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
66 7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969 66 7 c=31178 g=31178 cnq=1/0:0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
67 67
68This file has one line per CPU, or eight for this 8-CPU system. 68This file has one line per CPU, or eight for this 8-CPU system.
69The fields are as follows: 69The fields are as follows:
@@ -188,14 +188,14 @@ o "ca" is the number of RCU callbacks that have been adopted by this
188Kernels compiled with CONFIG_RCU_BOOST=y display the following from 188Kernels compiled with CONFIG_RCU_BOOST=y display the following from
189/debug/rcu/rcu_preempt/rcudata: 189/debug/rcu/rcu_preempt/rcudata:
190 190
191 0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871 191 0!c=12865 g=12866 cnq=1/0:1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
192 1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485 192 1 c=14407 g=14408 cnq=1/0:0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
193 2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490 193 2 c=14407 g=14408 cnq=1/0:0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
194 3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290 194 3 c=14407 g=14408 cnq=1/0:0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
195 4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114 195 4 c=14405 g=14406 cnq=1/0:1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
196 5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722 196 5!c=14168 g=14169 cnq=1/0:0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
197 6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811 197 6 c=14404 g=14405 cnq=1/0:0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
198 7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042 198 7 c=14407 g=14408 cnq=1/0:1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
199 199
200This is similar to the output discussed above, but contains the following 200This is similar to the output discussed above, but contains the following
201additional fields: 201additional fields:
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index adc2184009c5..dc49c6712b17 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -364,7 +364,7 @@ uses of RCU may be found in listRCU.txt, arrayRCU.txt, and NMI-RCU.txt.
364 }; 364 };
365 DEFINE_SPINLOCK(foo_mutex); 365 DEFINE_SPINLOCK(foo_mutex);
366 366
367 struct foo *gbl_foo; 367 struct foo __rcu *gbl_foo;
368 368
369 /* 369 /*
370 * Create a new struct foo that is the same as the one currently 370 * Create a new struct foo that is the same as the one currently
@@ -386,7 +386,7 @@ uses of RCU may be found in listRCU.txt, arrayRCU.txt, and NMI-RCU.txt.
386 386
387 new_fp = kmalloc(sizeof(*new_fp), GFP_KERNEL); 387 new_fp = kmalloc(sizeof(*new_fp), GFP_KERNEL);
388 spin_lock(&foo_mutex); 388 spin_lock(&foo_mutex);
389 old_fp = gbl_foo; 389 old_fp = rcu_dereference_protected(gbl_foo, lockdep_is_held(&foo_mutex));
390 *new_fp = *old_fp; 390 *new_fp = *old_fp;
391 new_fp->a = new_a; 391 new_fp->a = new_a;
392 rcu_assign_pointer(gbl_foo, new_fp); 392 rcu_assign_pointer(gbl_foo, new_fp);
@@ -487,7 +487,7 @@ The foo_update_a() function might then be written as follows:
487 487
488 new_fp = kmalloc(sizeof(*new_fp), GFP_KERNEL); 488 new_fp = kmalloc(sizeof(*new_fp), GFP_KERNEL);
489 spin_lock(&foo_mutex); 489 spin_lock(&foo_mutex);
490 old_fp = gbl_foo; 490 old_fp = rcu_dereference_protected(gbl_foo, lockdep_is_held(&foo_mutex));
491 *new_fp = *old_fp; 491 *new_fp = *old_fp;
492 new_fp->a = new_a; 492 new_fp->a = new_a;
493 rcu_assign_pointer(gbl_foo, new_fp); 493 rcu_assign_pointer(gbl_foo, new_fp);
diff --git a/Documentation/arm/OMAP/README b/Documentation/arm/OMAP/README
new file mode 100644
index 000000000000..75645c45d14a
--- /dev/null
+++ b/Documentation/arm/OMAP/README
@@ -0,0 +1,7 @@
1This file contains documentation for running mainline
2kernel on omaps.
3
4KERNEL NEW DEPENDENCIES
5v4.3+ Update is needed for custom .config files to make sure
6 CONFIG_REGULATOR_PBIAS is enabled for MMC1 to work
7 properly.
diff --git a/Documentation/arm/SA1100/Victor b/Documentation/arm/SA1100/Victor
deleted file mode 100644
index 9cff415da5a7..000000000000
--- a/Documentation/arm/SA1100/Victor
+++ /dev/null
@@ -1,16 +0,0 @@
1Victor is known as a "digital talking book player" manufactured by
2VisuAide, Inc. to be used by blind people.
3
4For more information related to Victor, see:
5
6 http://www.humanware.com/en-usa/products
7
8Of course Victor is using Linux as its main operating system.
9The Victor implementation for Linux is maintained by Nicolas Pitre:
10
11 nico@visuaide.com
12 nico@fluxnic.net
13
14For any comments, please feel free to contact me through the above
15addresses.
16
diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt
index 4178ebda6e66..546a39048eb0 100644
--- a/Documentation/arm/memory.txt
+++ b/Documentation/arm/memory.txt
@@ -54,7 +54,7 @@ VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
54 located here through iotable_init(). 54 located here through iotable_init().
55 VMALLOC_START is based upon the value 55 VMALLOC_START is based upon the value
56 of the high_memory variable, and VMALLOC_END 56 of the high_memory variable, and VMALLOC_END
57 is equal to 0xff000000. 57 is equal to 0xff800000.
58 58
59PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region. 59PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region.
60 This maps the platforms RAM, and typically 60 This maps the platforms RAM, and typically
diff --git a/Documentation/arm/uefi.txt b/Documentation/arm/uefi.txt
index d60030a1b909..7b3fdfe0f7ba 100644
--- a/Documentation/arm/uefi.txt
+++ b/Documentation/arm/uefi.txt
@@ -60,5 +60,3 @@ linux,uefi-mmap-desc-ver | 32-bit | Version of the mmap descriptor format.
60-------------------------------------------------------------------------------- 60--------------------------------------------------------------------------------
61linux,uefi-stub-kern-ver | string | Copy of linux_banner from build. 61linux,uefi-stub-kern-ver | string | Copy of linux_banner from build.
62-------------------------------------------------------------------------------- 62--------------------------------------------------------------------------------
63
64For verbose debug messages, specify 'uefi_debug' on the kernel command line.
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 7d9d3c2286b2..369a4f48eb0d 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -173,13 +173,22 @@ Before jumping into the kernel, the following conditions must be met:
173 the kernel image will be entered must be initialised by software at a 173 the kernel image will be entered must be initialised by software at a
174 higher exception level to prevent execution in an UNKNOWN state. 174 higher exception level to prevent execution in an UNKNOWN state.
175 175
176 For systems with a GICv3 interrupt controller: 176 For systems with a GICv3 interrupt controller to be used in v3 mode:
177 - If EL3 is present: 177 - If EL3 is present:
178 ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1. 178 ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1.
179 ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1. 179 ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1.
180 - If the kernel is entered at EL1: 180 - If the kernel is entered at EL1:
181 ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1 181 ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1
182 ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1. 182 ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1.
183 - The DT or ACPI tables must describe a GICv3 interrupt controller.
184
185 For systems with a GICv3 interrupt controller to be used in
186 compatibility (v2) mode:
187 - If EL3 is present:
188 ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b0.
189 - If the kernel is entered at EL1:
190 ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b0.
191 - The DT or ACPI tables must describe a GICv2 interrupt controller.
183 192
184The requirements described above for CPU mode, caches, MMUs, architected 193The requirements described above for CPU mode, caches, MMUs, architected
185timers, coherency and system registers apply to all CPUs. All CPUs must 194timers, coherency and system registers apply to all CPUs. All CPUs must
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index b19fc34efdb1..c9d1cacb4395 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -542,6 +542,10 @@ The routines xchg() and cmpxchg() must provide the same exact
542memory-barrier semantics as the atomic and bit operations returning 542memory-barrier semantics as the atomic and bit operations returning
543values. 543values.
544 544
545Note: If someone wants to use xchg(), cmpxchg() and their variants,
546linux/atomic.h should be included rather than asm/cmpxchg.h, unless
547the code is in arch/* and can take care of itself.
548
545Spinlocks and rwlocks have memory barrier expectations as well. 549Spinlocks and rwlocks have memory barrier expectations as well.
546The rule to follow is simple: 550The rule to follow is simple:
547 551
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
index 2da059a4790c..cc56021eb60b 100644
--- a/Documentation/devicetree/bindings/arm/gic.txt
+++ b/Documentation/devicetree/bindings/arm/gic.txt
@@ -11,13 +11,14 @@ have PPIs or SGIs.
11Main node required properties: 11Main node required properties:
12 12
13- compatible : should be one of: 13- compatible : should be one of:
14 "arm,gic-400" 14 "arm,arm1176jzf-devchip-gic"
15 "arm,arm11mp-gic"
15 "arm,cortex-a15-gic" 16 "arm,cortex-a15-gic"
16 "arm,cortex-a9-gic"
17 "arm,cortex-a7-gic" 17 "arm,cortex-a7-gic"
18 "arm,arm11mp-gic" 18 "arm,cortex-a9-gic"
19 "arm,gic-400"
20 "arm,pl390"
19 "brcm,brahma-b15-gic" 21 "brcm,brahma-b15-gic"
20 "arm,arm1176jzf-devchip-gic"
21 "qcom,msm-8660-qgic" 22 "qcom,msm-8660-qgic"
22 "qcom,msm-qgic2" 23 "qcom,msm-qgic2"
23- interrupt-controller : Identifies the node as an interrupt controller 24- interrupt-controller : Identifies the node as an interrupt controller
@@ -58,6 +59,21 @@ Optional
58 regions, used when the GIC doesn't have banked registers. The offset is 59 regions, used when the GIC doesn't have banked registers. The offset is
59 cpu-offset * cpu-nr. 60 cpu-offset * cpu-nr.
60 61
62- clocks : List of phandle and clock-specific pairs, one for each entry
63 in clock-names.
64- clock-names : List of names for the GIC clock input(s). Valid clock names
65 depend on the GIC variant:
66 "ic_clk" (for "arm,arm11mp-gic")
67 "PERIPHCLKEN" (for "arm,cortex-a15-gic")
68 "PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic")
69 "clk" (for "arm,gic-400")
70 "gclk" (for "arm,pl390")
71
72- power-domains : A phandle and PM domain specifier as defined by bindings of
73 the power controller specified by phandle, used when the GIC
74 is part of a Power or Clock Domain.
75
76
61Example: 77Example:
62 78
63 intc: interrupt-controller@fff11000 { 79 intc: interrupt-controller@fff11000 {
diff --git a/Documentation/devicetree/bindings/arm/twd.txt b/Documentation/devicetree/bindings/arm/twd.txt
index 75b8610939fa..383ea19c2bf0 100644
--- a/Documentation/devicetree/bindings/arm/twd.txt
+++ b/Documentation/devicetree/bindings/arm/twd.txt
@@ -19,6 +19,11 @@ interrupts.
19- reg : Specify the base address and the size of the TWD timer 19- reg : Specify the base address and the size of the TWD timer
20 register window. 20 register window.
21 21
22Optional
23
24- always-on : a boolean property. If present, the timer is powered through
25 an always-on power domain, therefore it never loses context.
26
22Example: 27Example:
23 28
24 twd-timer@2c000600 { 29 twd-timer@2c000600 {
diff --git a/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt b/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
index 78edb80002c8..78e2a31c58d0 100644
--- a/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
+++ b/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
@@ -5,6 +5,8 @@ The follow error types are supported:
5 5
6 memory controller - Memory controller 6 memory controller - Memory controller
7 PMD (L1/L2) - Processor module unit (PMD) L1/L2 cache 7 PMD (L1/L2) - Processor module unit (PMD) L1/L2 cache
8 L3 - L3 cache controller
9 SoC - SoC IP's such as Ethernet, SATA, and etc
8 10
9The following section describes the EDAC DT node binding. 11The following section describes the EDAC DT node binding.
10 12
@@ -30,6 +32,17 @@ Required properties for PMD subnode:
30- reg : First resource shall be the PMD resource. 32- reg : First resource shall be the PMD resource.
31- pmd-controller : Instance number of the PMD controller. 33- pmd-controller : Instance number of the PMD controller.
32 34
35Required properties for L3 subnode:
36- compatible : Shall be "apm,xgene-edac-l3" or
37 "apm,xgene-edac-l3-v2".
38- reg : First resource shall be the L3 EDAC resource.
39
40Required properties for SoC subnode:
41- compatible : Shall be "apm,xgene-edac-soc-v1" for revision 1 or
42 "apm,xgene-edac-l3-soc" for general value reporting
43 only.
44- reg : First resource shall be the SoC EDAC resource.
45
33Example: 46Example:
34 csw: csw@7e200000 { 47 csw: csw@7e200000 {
35 compatible = "apm,xgene-csw", "syscon"; 48 compatible = "apm,xgene-csw", "syscon";
@@ -76,4 +89,14 @@ Example:
76 reg = <0x0 0x7c000000 0x0 0x200000>; 89 reg = <0x0 0x7c000000 0x0 0x200000>;
77 pmd-controller = <0>; 90 pmd-controller = <0>;
78 }; 91 };
92
93 edacl3@7e600000 {
94 compatible = "apm,xgene-edac-l3";
95 reg = <0x0 0x7e600000 0x0 0x1000>;
96 };
97
98 edacsoc@7e930000 {
99 compatible = "apm,xgene-edac-soc-v1";
100 reg = <0x0 0x7e930000 0x0 0x1000>;
101 };
79 }; 102 };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-msm.txt b/Documentation/devicetree/bindings/gpio/gpio-msm.txt
deleted file mode 100644
index ac20e68a004e..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-msm.txt
+++ /dev/null
@@ -1,26 +0,0 @@
1MSM GPIO controller bindings
2
3Required properties:
4- compatible:
5 - "qcom,msm-gpio" for MSM controllers
6- #gpio-cells : Should be two.
7 - first cell is the pin number
8 - second cell is used to specify optional parameters (unused)
9- gpio-controller : Marks the device node as a GPIO controller.
10- #interrupt-cells : Should be 2.
11- interrupt-controller: Mark the device node as an interrupt controller
12- interrupts : Specify the TLMM summary interrupt number
13- ngpio : Specify the number of MSM GPIOs
14
15Example:
16
17 msmgpio: gpio@fd510000 {
18 compatible = "qcom,msm-gpio";
19 gpio-controller;
20 #gpio-cells = <2>;
21 interrupt-controller;
22 #interrupt-cells = <2>;
23 reg = <0xfd510000 0x4000>;
24 interrupts = <0 208 0>;
25 ngpio = <150>;
26 };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
index b9a42f294dd0..13df9933f4cd 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
@@ -24,6 +24,7 @@ Required properties:
24 ti,tca6408 24 ti,tca6408
25 ti,tca6416 25 ti,tca6416
26 ti,tca6424 26 ti,tca6424
27 ti,tca9539
27 exar,xra1202 28 exar,xra1202
28 29
29Example: 30Example:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
index db4c6a663c03..7b542657f259 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
@@ -12,6 +12,13 @@ Required properties:
12- interrupts : Interrupt specifier (see interrupt bindings for 12- interrupts : Interrupt specifier (see interrupt bindings for
13 details) 13 details)
14- interrupt-parent : Must be core interrupt controller 14- interrupt-parent : Must be core interrupt controller
15- interrupt-controller : Marks the device node as an interrupt controller.
16- #interrupt-cells : Should be 2. The first cell is the GPIO number.
17 The second cell bits[3:0] is used to specify trigger type and level flags:
18 1 = low-to-high edge triggered.
19 2 = high-to-low edge triggered.
20 4 = active high level-sensitive.
21 8 = active low level-sensitive.
15- reg : Address and length of the register set for the device 22- reg : Address and length of the register set for the device
16 23
17Example: 24Example:
@@ -22,5 +29,7 @@ Example:
22 gpio-controller; 29 gpio-controller;
23 interrupt-parent = <&intc>; 30 interrupt-parent = <&intc>;
24 interrupts = <0 20 4>; 31 interrupts = <0 20 4>;
32 interrupt-controller;
33 #interrupt-cells = <2>;
25 reg = <0xe000a000 0x1000>; 34 reg = <0xe000a000 0x1000>;
26 }; 35 };
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index 82d40e2505f6..069cdf6f9dac 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -54,9 +54,13 @@ only uses one.
54 54
55gpio-specifier may encode: bank, pin position inside the bank, 55gpio-specifier may encode: bank, pin position inside the bank,
56whether pin is open-drain and whether pin is logically inverted. 56whether pin is open-drain and whether pin is logically inverted.
57
57Exact meaning of each specifier cell is controller specific, and must 58Exact meaning of each specifier cell is controller specific, and must
58be documented in the device tree binding for the device. Use the macros 59be documented in the device tree binding for the device.
59defined in include/dt-bindings/gpio/gpio.h whenever possible: 60
61Most controllers are however specifying a generic flag bitfield
62in the last cell, so for these, use the macros defined in
63include/dt-bindings/gpio/gpio.h whenever possible:
60 64
61Example of a node using GPIOs: 65Example of a node using GPIOs:
62 66
@@ -67,6 +71,15 @@ Example of a node using GPIOs:
67GPIO_ACTIVE_HIGH is 0, so in this example gpio-specifier is "18 0" and encodes 71GPIO_ACTIVE_HIGH is 0, so in this example gpio-specifier is "18 0" and encodes
68GPIO pin number, and GPIO flags as accepted by the "qe_pio_e" gpio-controller. 72GPIO pin number, and GPIO flags as accepted by the "qe_pio_e" gpio-controller.
69 73
74Optional standard bitfield specifiers for the last cell:
75
76- Bit 0: 0 means active high, 1 means active low
77- Bit 1: 1 means single-ended wiring, see:
78 https://en.wikipedia.org/wiki/Single-ended_triode
79 When used with active-low, this means open drain/collector, see:
80 https://en.wikipedia.org/wiki/Open_collector
81 When used with active-high, this means open source/emitter
82
701.1) GPIO specifier best practices 831.1) GPIO specifier best practices
71---------------------------------- 84----------------------------------
72 85
@@ -118,6 +131,30 @@ Every GPIO controller node must contain both an empty "gpio-controller"
118property, and a #gpio-cells integer property, which indicates the number of 131property, and a #gpio-cells integer property, which indicates the number of
119cells in a gpio-specifier. 132cells in a gpio-specifier.
120 133
134Optionally, a GPIO controller may have a "ngpios" property. This property
135indicates the number of in-use slots of available slots for GPIOs. The
136typical example is something like this: the hardware register is 32 bits
137wide, but only 18 of the bits have a physical counterpart. The driver is
138generally written so that all 32 bits can be used, but the IP block is reused
139in a lot of designs, some using all 32 bits, some using 18 and some using
14012. In this case, setting "ngpios = <18>;" informs the driver that only the
141first 18 GPIOs, at local offset 0 .. 17, are in use.
142
143If these GPIOs do not happen to be the first N GPIOs at offset 0...N-1, an
144additional bitmask is needed to specify which GPIOs are actually in use,
145and which are dummies. The bindings for this case has not yet been
146specified, but should be specified if/when such hardware appears.
147
148Example:
149
150gpio-controller@00000000 {
151 compatible = "foo";
152 reg = <0x00000000 0x1000>;
153 gpio-controller;
154 #gpio-cells = <2>;
155 ngpios = <18>;
156}
157
121The GPIO chip may contain GPIO hog definitions. GPIO hogging is a mechanism 158The GPIO chip may contain GPIO hog definitions. GPIO hogging is a mechanism
122providing automatic GPIO request and configuration as part of the 159providing automatic GPIO request and configuration as part of the
123gpio-controller's driver probe function. 160gpio-controller's driver probe function.
diff --git a/Documentation/devicetree/bindings/gpio/netxbig-gpio-ext.txt b/Documentation/devicetree/bindings/gpio/netxbig-gpio-ext.txt
new file mode 100644
index 000000000000..50ec2e690701
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/netxbig-gpio-ext.txt
@@ -0,0 +1,22 @@
1Binding for the GPIO extension bus found on some LaCie/Seagate boards
2(Example: 2Big/5Big Network v2, 2Big NAS).
3
4Required properties:
5- compatible: "lacie,netxbig-gpio-ext".
6- addr-gpios: GPIOs representing the address register (LSB -> MSB).
7- data-gpios: GPIOs representing the data register (LSB -> MSB).
8- enable-gpio: latches the new configuration (address, data) on raising edge.
9
10Example:
11
12netxbig_gpio_ext: netxbig-gpio-ext {
13 compatible = "lacie,netxbig-gpio-ext";
14
15 addr-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH
16 &gpio1 16 GPIO_ACTIVE_HIGH
17 &gpio1 17 GPIO_ACTIVE_HIGH>;
18 data-gpios = <&gpio1 12 GPIO_ACTIVE_HIGH
19 &gpio1 13 GPIO_ACTIVE_HIGH
20 &gpio1 14 GPIO_ACTIVE_HIGH>;
21 enable-gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>;
22};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index 63633bdea7e4..ae5054c27c99 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -10,6 +10,7 @@ Required properties:
10 - "renesas,irqc-r8a7792" (R-Car V2H) 10 - "renesas,irqc-r8a7792" (R-Car V2H)
11 - "renesas,irqc-r8a7793" (R-Car M2-N) 11 - "renesas,irqc-r8a7793" (R-Car M2-N)
12 - "renesas,irqc-r8a7794" (R-Car E2) 12 - "renesas,irqc-r8a7794" (R-Car E2)
13 - "renesas,intc-ex-r8a7795" (R-Car H3)
13- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in 14- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
14 interrupts.txt in this directory 15 interrupts.txt in this directory
15- clocks: Must contain a reference to the functional clock. 16- clocks: Must contain a reference to the functional clock.
diff --git a/Documentation/devicetree/bindings/leds/leds-aat1290.txt b/Documentation/devicetree/bindings/leds/leds-aat1290.txt
index c05ed91a4e42..85c0c58617f6 100644
--- a/Documentation/devicetree/bindings/leds/leds-aat1290.txt
+++ b/Documentation/devicetree/bindings/leds/leds-aat1290.txt
@@ -27,9 +27,9 @@ Required properties of the LED child node:
27- flash-max-microamp : see Documentation/devicetree/bindings/leds/common.txt 27- flash-max-microamp : see Documentation/devicetree/bindings/leds/common.txt
28 Maximum flash LED supply current can be calculated using 28 Maximum flash LED supply current can be calculated using
29 following formula: I = 1A * 162kohm / Rset. 29 following formula: I = 1A * 162kohm / Rset.
30- flash-timeout-us : see Documentation/devicetree/bindings/leds/common.txt 30- flash-max-timeout-us : see Documentation/devicetree/bindings/leds/common.txt
31 Maximum flash timeout can be calculated using following 31 Maximum flash timeout can be calculated using following
32 formula: T = 8.82 * 10^9 * Ct. 32 formula: T = 8.82 * 10^9 * Ct.
33 33
34Optional properties of the LED child node: 34Optional properties of the LED child node:
35- label : see Documentation/devicetree/bindings/leds/common.txt 35- label : see Documentation/devicetree/bindings/leds/common.txt
@@ -54,7 +54,7 @@ aat1290 {
54 label = "aat1290-flash"; 54 label = "aat1290-flash";
55 led-max-microamp = <520833>; 55 led-max-microamp = <520833>;
56 flash-max-microamp = <1012500>; 56 flash-max-microamp = <1012500>;
57 flash-timeout-us = <1940000>; 57 flash-max-timeout-us = <1940000>;
58 }; 58 };
59}; 59};
60 60
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
index f9e36adc0ebf..3f48c1eaf085 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
@@ -29,6 +29,14 @@ Required properties:
29Optional properties: 29Optional properties:
30 - brcm,serial-leds : Boolean, enables Serial LEDs. 30 - brcm,serial-leds : Boolean, enables Serial LEDs.
31 Default : false 31 Default : false
32 - brcm,serial-mux : Boolean, enables Serial LEDs multiplexing.
33 Default : false
34 - brcm,serial-clk-low : Boolean, makes clock signal active low.
35 Default : false
36 - brcm,serial-dat-low : Boolean, makes data signal active low.
37 Default : false
38 - brcm,serial-shift-inv : Boolean, inverts Serial LEDs shift direction.
39 Default : false
32 40
33Each LED is represented as a sub-node of the brcm,bcm6328-leds device. 41Each LED is represented as a sub-node of the brcm,bcm6328-leds device.
34 42
@@ -110,6 +118,8 @@ Scenario 2 : BCM63268 with Serial/GPHY0 LEDs
110 #size-cells = <0>; 118 #size-cells = <0>;
111 reg = <0x10001900 0x24>; 119 reg = <0x10001900 0x24>;
112 brcm,serial-leds; 120 brcm,serial-leds;
121 brcm,serial-dat-low;
122 brcm,serial-shift-inv;
113 123
114 gphy0_spd0@0 { 124 gphy0_spd0@0 {
115 reg = <0>; 125 reg = <0>;
diff --git a/Documentation/devicetree/bindings/leds/leds-netxbig.txt b/Documentation/devicetree/bindings/leds/leds-netxbig.txt
new file mode 100644
index 000000000000..5ef92a26d768
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-netxbig.txt
@@ -0,0 +1,92 @@
1Binding for the CPLD LEDs (GPIO extension bus) found on some LaCie/Seagate
2boards (Example: 2Big/5Big Network v2, 2Big NAS).
3
4Required properties:
5- compatible: "lacie,netxbig-leds".
6- gpio-ext: Phandle for the gpio-ext bus.
7
8Optional properties:
9- timers: Timer array. Each timer entry is represented by three integers:
10 Mode (gpio-ext bus), delay_on and delay_off.
11
12Each LED is represented as a sub-node of the netxbig-leds device.
13
14Required sub-node properties:
15- mode-addr: Mode register address on gpio-ext bus.
16- mode-val: Mode to value mapping. Each entry is represented by two integers:
17 A mode and the corresponding value on the gpio-ext bus.
18- bright-addr: Brightness register address on gpio-ext bus.
19- max-brightness: Maximum brightness value.
20
21Optional sub-node properties:
22- label: Name for this LED. If omitted, the label is taken from the node name.
23- linux,default-trigger: Trigger assigned to the LED.
24
25Example:
26
27netxbig-leds {
28 compatible = "lacie,netxbig-leds";
29
30 gpio-ext = &gpio_ext;
31
32 timers = <NETXBIG_LED_TIMER1 500 500
33 NETXBIG_LED_TIMER2 500 1000>;
34
35 blue-power {
36 label = "netxbig:blue:power";
37 mode-addr = <0>;
38 mode-val = <NETXBIG_LED_OFF 0
39 NETXBIG_LED_ON 1
40 NETXBIG_LED_TIMER1 3
41 NETXBIG_LED_TIMER2 7>;
42 bright-addr = <1>;
43 max-brightness = <7>;
44 };
45 red-power {
46 label = "netxbig:red:power";
47 mode-addr = <0>;
48 mode-val = <NETXBIG_LED_OFF 0
49 NETXBIG_LED_ON 2
50 NETXBIG_LED_TIMER1 4>;
51 bright-addr = <1>;
52 max-brightness = <7>;
53 };
54 blue-sata0 {
55 label = "netxbig:blue:sata0";
56 mode-addr = <3>;
57 mode-val = <NETXBIG_LED_OFF 0
58 NETXBIG_LED_ON 7
59 NETXBIG_LED_SATA 1
60 NETXBIG_LED_TIMER1 3>;
61 bright-addr = <2>;
62 max-brightness = <7>;
63 };
64 red-sata0 {
65 label = "netxbig:red:sata0";
66 mode-addr = <3>;
67 mode-val = <NETXBIG_LED_OFF 0
68 NETXBIG_LED_ON 2
69 NETXBIG_LED_TIMER1 4>;
70 bright-addr = <2>;
71 max-brightness = <7>;
72 };
73 blue-sata1 {
74 label = "netxbig:blue:sata1";
75 mode-addr = <4>;
76 mode-val = <NETXBIG_LED_OFF 0
77 NETXBIG_LED_ON 7
78 NETXBIG_LED_SATA 1
79 NETXBIG_LED_TIMER1 3>;
80 bright-addr = <2>;
81 max-brightness = <7>;
82 };
83 red-sata1 {
84 label = "netxbig:red:sata1";
85 mode-addr = <4>;
86 mode-val = <NETXBIG_LED_OFF 0
87 NETXBIG_LED_ON 2
88 NETXBIG_LED_TIMER1 4>;
89 bright-addr = <2>;
90 max-brightness = <7>;
91 };
92};
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index b7943f3f9995..dedfb02c744a 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -22,6 +22,8 @@ Optional properties:
22 - voltage-ranges : two cells are required, first cell specifies minimum 22 - voltage-ranges : two cells are required, first cell specifies minimum
23 slot voltage (mV), second cell specifies maximum slot voltage (mV). 23 slot voltage (mV), second cell specifies maximum slot voltage (mV).
24 Several ranges could be specified. 24 Several ranges could be specified.
25 - little-endian : If the host controller is little-endian mode, specify
26 this property. The default endian mode is big-endian.
25 27
26Example: 28Example:
27 29
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index 0384fc3f64e8..f693baf87264 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -37,6 +37,7 @@ Optional properties:
37- sd-uhs-sdr104: SD UHS SDR104 speed is supported 37- sd-uhs-sdr104: SD UHS SDR104 speed is supported
38- sd-uhs-ddr50: SD UHS DDR50 speed is supported 38- sd-uhs-ddr50: SD UHS DDR50 speed is supported
39- cap-power-off-card: powering off the card is safe 39- cap-power-off-card: powering off the card is safe
40- cap-mmc-hw-reset: eMMC hardware reset is supported
40- cap-sdio-irq: enable SDIO IRQ signalling on this interface 41- cap-sdio-irq: enable SDIO IRQ signalling on this interface
41- full-pwr-cycle: full power cycle of the card is supported 42- full-pwr-cycle: full power cycle of the card is supported
42- mmc-ddr-1_8v: eMMC high-speed DDR mode(1.8V I/O) is supported 43- mmc-ddr-1_8v: eMMC high-speed DDR mode(1.8V I/O) is supported
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
index a1adfa495ad3..0120c7f1109c 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
@@ -17,6 +17,11 @@ Required properties:
17- vmmc-supply: power to the Core 17- vmmc-supply: power to the Core
18- vqmmc-supply: power to the IO 18- vqmmc-supply: power to the IO
19 19
20Optional properties:
21- assigned-clocks: PLL of the source clock
22- assigned-clock-parents: parent of source clock, used for HS400 mode to get 400Mhz source clock
23- hs400-ds-delay: HS400 DS delay setting
24
20Examples: 25Examples:
21mmc0: mmc@11230000 { 26mmc0: mmc@11230000 {
22 compatible = "mediatek,mt8173-mmc", "mediatek,mt8135-mmc"; 27 compatible = "mediatek,mt8173-mmc", "mediatek,mt8135-mmc";
@@ -24,9 +29,13 @@ mmc0: mmc@11230000 {
24 interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_LOW>; 29 interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_LOW>;
25 vmmc-supply = <&mt6397_vemc_3v3_reg>; 30 vmmc-supply = <&mt6397_vemc_3v3_reg>;
26 vqmmc-supply = <&mt6397_vio18_reg>; 31 vqmmc-supply = <&mt6397_vio18_reg>;
27 clocks = <&pericfg CLK_PERI_MSDC30_0>, <&topckgen CLK_TOP_MSDC50_0_H_SEL>; 32 clocks = <&pericfg CLK_PERI_MSDC30_0>,
33 <&topckgen CLK_TOP_MSDC50_0_H_SEL>;
28 clock-names = "source", "hclk"; 34 clock-names = "source", "hclk";
29 pinctrl-names = "default", "state_uhs"; 35 pinctrl-names = "default", "state_uhs";
30 pinctrl-0 = <&mmc0_pins_default>; 36 pinctrl-0 = <&mmc0_pins_default>;
31 pinctrl-1 = <&mmc0_pins_uhs>; 37 pinctrl-1 = <&mmc0_pins_uhs>;
38 assigned-clocks = <&topckgen CLK_TOP_MSDC50_0_SEL>;
39 assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_D2>;
40 hs400-ds-delay = <0x14015>;
32}; 41};
diff --git a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
index d38942f6c5ae..cae29eb5733d 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
+++ b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
@@ -6,11 +6,12 @@ and the properties used by the MMCIF device.
6 6
7Required properties: 7Required properties:
8 8
9- compatible: must contain one of the following 9- compatible: should be "renesas,mmcif-<soctype>", "renesas,sh-mmcif" as a
10 fallback. Examples with <soctype> are:
10 - "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs 11 - "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
11 - "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs 12 - "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs
12 - "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs 13 - "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs
13 - "renesas,sh-mmcif" for the generic MMCIF 14 - "renesas,mmcif-r8a7794" for the MMCIF found in r8a7794 SoCs
14 15
15- clocks: reference to the functional clock 16- clocks: reference to the functional clock
16 17
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index c327c2d6f23d..3dc13b68fc3f 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -14,6 +14,19 @@ Required Properties:
14 before RK3288 14 before RK3288
15 - "rockchip,rk3288-dw-mshc": for Rockchip RK3288 15 - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
16 16
17Optional Properties:
18* clocks: from common clock binding: if ciu_drive and ciu_sample are
19 specified in clock-names, should contain handles to these clocks.
20
21* clock-names: Apart from the clock-names described in synopsys-dw-mshc.txt
22 two more clocks "ciu-drive" and "ciu-sample" are supported. They are used
23 to control the clock phases, "ciu-sample" is required for tuning high-
24 speed modes.
25
26* rockchip,default-sample-phase: The default phase to set ciu_sample at
27 probing, low speeds or in case where all phases work at tuning time.
28 If not specified 0 deg will be used.
29
17Example: 30Example:
18 31
19 rkdwmmc0@12200000 { 32 rkdwmmc0@12200000 {
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 346c6095a615..8636f5ae97e5 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -75,6 +75,12 @@ Optional properties:
75* vmmc-supply: The phandle to the regulator to use for vmmc. If this is 75* vmmc-supply: The phandle to the regulator to use for vmmc. If this is
76 specified we'll defer probe until we can find this regulator. 76 specified we'll defer probe until we can find this regulator.
77 77
78* dmas: List of DMA specifiers with the controller specific format as described
79 in the generic DMA client binding. Refer to dma.txt for details.
80
81* dma-names: request names for generic DMA client binding. Must be "rx-tx".
82 Refer to dma.txt for details.
83
78Aliases: 84Aliases:
79 85
80- All the MSHC controller nodes should be represented in the aliases node using 86- All the MSHC controller nodes should be represented in the aliases node using
@@ -95,6 +101,23 @@ board specific portions as listed below.
95 #size-cells = <0>; 101 #size-cells = <0>;
96 }; 102 };
97 103
104[board specific internal DMA resources]
105
106 dwmmc0@12200000 {
107 clock-frequency = <400000000>;
108 clock-freq-min-max = <400000 200000000>;
109 num-slots = <1>;
110 broken-cd;
111 fifo-depth = <0x80>;
112 card-detect-delay = <200>;
113 vmmc-supply = <&buck8>;
114 bus-width = <8>;
115 cap-mmc-highspeed;
116 cap-sd-highspeed;
117 };
118
119[board specific generic DMA request binding]
120
98 dwmmc0@12200000 { 121 dwmmc0@12200000 {
99 clock-frequency = <400000000>; 122 clock-frequency = <400000000>;
100 clock-freq-min-max = <400000 200000000>; 123 clock-freq-min-max = <400000 200000000>;
@@ -106,4 +129,6 @@ board specific portions as listed below.
106 bus-width = <8>; 129 bus-width = <8>;
107 cap-mmc-highspeed; 130 cap-mmc-highspeed;
108 cap-sd-highspeed; 131 cap-sd-highspeed;
132 dmas = <&pdma 12>;
133 dma-names = "rx-tx";
109 }; 134 };
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index a9df21aaa154..a2cae4eb4a60 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -39,6 +39,7 @@ Required properties:
39Optional properties: 39Optional properties:
40- dual_emac_res_vlan : Specifies VID to be used to segregate the ports 40- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
41- mac-address : See ethernet.txt file in the same directory 41- mac-address : See ethernet.txt file in the same directory
42- phy-handle : See ethernet.txt file in the same directory
42 43
43Note: "ti,hwmods" field is used to fetch the base address and irq 44Note: "ti,hwmods" field is used to fetch the base address and irq
44resources from TI, omap hwmod data base during device registration. 45resources from TI, omap hwmod data base during device registration.
diff --git a/Documentation/devicetree/bindings/net/smsc-lan87xx.txt b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
new file mode 100644
index 000000000000..974edd5c85cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
@@ -0,0 +1,24 @@
1SMSC LAN87xx Ethernet PHY
2
3Some boards require special tuning values. Configure them
4through an Ethernet OF device node.
5
6Optional properties:
7
8- smsc,disable-energy-detect:
9 If set, do not enable energy detect mode for the SMSC phy.
10 default: enable energy detect mode
11
12Examples:
13smsc phy with disabled energy detect mode on an am335x based board.
14&davinci_mdio {
15 pinctrl-names = "default", "sleep";
16 pinctrl-0 = <&davinci_mdio_default>;
17 pinctrl-1 = <&davinci_mdio_sleep>;
18 status = "okay";
19
20 ethernetphy0: ethernet-phy@0 {
21 reg = <0>;
22 smsc,disable-energy-detect;
23 };
24};
diff --git a/Documentation/devicetree/bindings/pci/pci-msi.txt b/Documentation/devicetree/bindings/pci/pci-msi.txt
new file mode 100644
index 000000000000..9b3cc817d181
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-msi.txt
@@ -0,0 +1,220 @@
1This document describes the generic device tree binding for describing the
2relationship between PCI devices and MSI controllers.
3
4Each PCI device under a root complex is uniquely identified by its Requester ID
5(AKA RID). A Requester ID is a triplet of a Bus number, Device number, and
6Function number.
7
8For the purpose of this document, when treated as a numeric value, a RID is
9formatted such that:
10
11* Bits [15:8] are the Bus number.
12* Bits [7:3] are the Device number.
13* Bits [2:0] are the Function number.
14* Any other bits required for padding must be zero.
15
16MSIs may be distinguished in part through the use of sideband data accompanying
17writes. In the case of PCI devices, this sideband data may be derived from the
18Requester ID. A mechanism is required to associate a device with both the MSI
19controllers it can address, and the sideband data that will be associated with
20its writes to those controllers.
21
22For generic MSI bindings, see
23Documentation/devicetree/bindings/interrupt-controller/msi.txt.
24
25
26PCI root complex
27================
28
29Optional properties
30-------------------
31
32- msi-map: Maps a Requester ID to an MSI controller and associated
33 msi-specifier data. The property is an arbitrary number of tuples of
34 (rid-base,msi-controller,msi-base,length), where:
35
36 * rid-base is a single cell describing the first RID matched by the entry.
37
38 * msi-controller is a single phandle to an MSI controller
39
40 * msi-base is an msi-specifier describing the msi-specifier produced for the
41 first RID matched by the entry.
42
43 * length is a single cell describing how many consecutive RIDs are matched
44 following the rid-base.
45
46 Any RID r in the interval [rid-base, rid-base + length) is associated with
47 the listed msi-controller, with the msi-specifier (r - rid-base + msi-base).
48
49- msi-map-mask: A mask to be applied to each Requester ID prior to being mapped
50 to an msi-specifier per the msi-map property.
51
52- msi-parent: Describes the MSI parent of the root complex itself. Where
53 the root complex and MSI controller do not pass sideband data with MSI
54 writes, this property may be used to describe the MSI controller(s)
55 used by PCI devices under the root complex, if defined as such in the
56 binding for the root complex.
57
58
59Example (1)
60===========
61
62/ {
63 #address-cells = <1>;
64 #size-cells = <1>;
65
66 msi: msi-controller@a {
67 reg = <0xa 0x1>;
68 compatible = "vendor,some-controller";
69 msi-controller;
70 #msi-cells = <1>;
71 };
72
73 pci: pci@f {
74 reg = <0xf 0x1>;
75 compatible = "vendor,pcie-root-complex";
76 device_type = "pci";
77
78 /*
79 * The sideband data provided to the MSI controller is
80 * the RID, identity-mapped.
81 */
82 msi-map = <0x0 &msi_a 0x0 0x10000>,
83 };
84};
85
86
87Example (2)
88===========
89
90/ {
91 #address-cells = <1>;
92 #size-cells = <1>;
93
94 msi: msi-controller@a {
95 reg = <0xa 0x1>;
96 compatible = "vendor,some-controller";
97 msi-controller;
98 #msi-cells = <1>;
99 };
100
101 pci: pci@f {
102 reg = <0xf 0x1>;
103 compatible = "vendor,pcie-root-complex";
104 device_type = "pci";
105
106 /*
107 * The sideband data provided to the MSI controller is
108 * the RID, masked to only the device and function bits.
109 */
110 msi-map = <0x0 &msi_a 0x0 0x100>,
111 msi-map-mask = <0xff>
112 };
113};
114
115
116Example (3)
117===========
118
119/ {
120 #address-cells = <1>;
121 #size-cells = <1>;
122
123 msi: msi-controller@a {
124 reg = <0xa 0x1>;
125 compatible = "vendor,some-controller";
126 msi-controller;
127 #msi-cells = <1>;
128 };
129
130 pci: pci@f {
131 reg = <0xf 0x1>;
132 compatible = "vendor,pcie-root-complex";
133 device_type = "pci";
134
135 /*
136 * The sideband data provided to the MSI controller is
137 * the RID, but the high bit of the bus number is
138 * ignored.
139 */
140 msi-map = <0x0000 &msi 0x0000 0x8000>,
141 <0x8000 &msi 0x0000 0x8000>;
142 };
143};
144
145
146Example (4)
147===========
148
149/ {
150 #address-cells = <1>;
151 #size-cells = <1>;
152
153 msi: msi-controller@a {
154 reg = <0xa 0x1>;
155 compatible = "vendor,some-controller";
156 msi-controller;
157 #msi-cells = <1>;
158 };
159
160 pci: pci@f {
161 reg = <0xf 0x1>;
162 compatible = "vendor,pcie-root-complex";
163 device_type = "pci";
164
165 /*
166 * The sideband data provided to the MSI controller is
167 * the RID, but the high bit of the bus number is
168 * negated.
169 */
170 msi-map = <0x0000 &msi 0x8000 0x8000>,
171 <0x8000 &msi 0x0000 0x8000>;
172 };
173};
174
175
176Example (5)
177===========
178
179/ {
180 #address-cells = <1>;
181 #size-cells = <1>;
182
183 msi_a: msi-controller@a {
184 reg = <0xa 0x1>;
185 compatible = "vendor,some-controller";
186 msi-controller;
187 #msi-cells = <1>;
188 };
189
190 msi_b: msi-controller@b {
191 reg = <0xb 0x1>;
192 compatible = "vendor,some-controller";
193 msi-controller;
194 #msi-cells = <1>;
195 };
196
197 msi_c: msi-controller@c {
198 reg = <0xc 0x1>;
199 compatible = "vendor,some-controller";
200 msi-controller;
201 #msi-cells = <1>;
202 };
203
204 pci: pci@c {
205 reg = <0xf 0x1>;
206 compatible = "vendor,pcie-root-complex";
207 device_type = "pci";
208
209 /*
210 * The sideband data provided to MSI controller a is the
211 * RID, but the high bit of the bus number is negated.
212 * The sideband data provided to MSI controller b is the
213 * RID, identity-mapped.
214 * MSI controller c is not addressable.
215 */
216 msi-map = <0x0000 &msi_a 0x8000 0x08000>,
217 <0x8000 &msi_a 0x0000 0x08000>,
218 <0x0000 &msi_b 0x0000 0x10000>;
219 };
220};
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 3c821cda1ad0..b321b26780dc 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -17,6 +17,7 @@ Required properties:
17 "allwinner,sun8i-a23-pinctrl" 17 "allwinner,sun8i-a23-pinctrl"
18 "allwinner,sun8i-a23-r-pinctrl" 18 "allwinner,sun8i-a23-r-pinctrl"
19 "allwinner,sun8i-a33-pinctrl" 19 "allwinner,sun8i-a33-pinctrl"
20 "allwinner,sun8i-a83t-pinctrl"
20 21
21- reg: Should contain the register physical address and length for the 22- reg: Should contain the register physical address and length for the
22 pin controller. 23 pin controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
new file mode 100644
index 000000000000..61ac75706cc9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
@@ -0,0 +1,90 @@
1* Atmel PIO4 Controller
2
3The Atmel PIO4 controller is used to select the function of a pin and to
4configure it.
5
6Required properties:
7- compatible: "atmel,sama5d2-pinctrl".
8- reg: base address and length of the PIO controller.
9- interrupts: interrupt outputs from the controller, one for each bank.
10- interrupt-controller: mark the device node as an interrupt controller.
11- #interrupt-cells: should be two.
12- gpio-controller: mark the device node as a gpio controller.
13- #gpio-cells: should be two.
14
15Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
16a general description of GPIO and interrupt bindings.
17
18Please refer to pinctrl-bindings.txt in this directory for details of the
19common pinctrl bindings used by client devices.
20
21Subnode format
22Each node (or subnode) will list the pins it needs and how to configured these
23pins.
24
25 node {
26 pinmux = <PIN_NUMBER_PINMUX>;
27 GENERIC_PINCONFIG;
28 };
29
30Required properties:
31- pinmux: integer array. Each integer represents a pin number plus mux and
32ioset settings. Use the macros from boot/dts/<soc>-pinfunc.h file to get the
33right representation of the pin.
34
35Optional properties:
36- GENERIC_PINCONFIG: generic pinconfig options to use, bias-disable,
37bias-pull-down, bias-pull-up, drive-open-drain, input-schmitt-enable,
38input-debounce, output-low, output-high.
39
40Example:
41
42#include <sama5d2-pinfunc.h>
43
44...
45{
46 pioA: pinctrl@fc038000 {
47 compatible = "atmel,sama5d2-pinctrl";
48 reg = <0xfc038000 0x600>;
49 interrupts = <18 IRQ_TYPE_LEVEL_HIGH 7>,
50 <68 IRQ_TYPE_LEVEL_HIGH 7>,
51 <69 IRQ_TYPE_LEVEL_HIGH 7>,
52 <70 IRQ_TYPE_LEVEL_HIGH 7>;
53 interrupt-controller;
54 #interrupt-cells = <2>;
55 gpio-controller;
56 #gpio-cells = <2>;
57 clocks = <&pioA_clk>;
58
59 pinctrl_i2c0_default: i2c0_default {
60 pinmux = <PIN_PD21__TWD0>,
61 <PIN_PD22__TWCK0>;
62 bias-disable;
63 };
64
65 pinctrl_led_gpio_default: led_gpio_default {
66 pinmux = <PIN_PB0>,
67 <PIN_PB5>;
68 bias-pull-up;
69 };
70
71 pinctrl_sdmmc1_default: sdmmc1_default {
72 cmd_data {
73 pinmux = <PIN_PA28__SDMMC1_CMD>,
74 <PIN_PA18__SDMMC1_DAT0>,
75 <PIN_PA19__SDMMC1_DAT1>,
76 <PIN_PA20__SDMMC1_DAT2>,
77 <PIN_PA21__SDMMC1_DAT3>;
78 bias-pull-up;
79 };
80
81 ck_cd {
82 pinmux = <PIN_PA22__SDMMC1_CK>,
83 <PIN_PA30__SDMMC1_CD>;
84 bias-disable;
85 };
86 };
87 ...
88 };
89};
90...
diff --git a/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt
index a8bb5e26019c..f8fa28ce163e 100644
--- a/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt
@@ -20,7 +20,10 @@ Required properties:
20 "marvell,berlin2cd-soc-pinctrl", 20 "marvell,berlin2cd-soc-pinctrl",
21 "marvell,berlin2cd-system-pinctrl", 21 "marvell,berlin2cd-system-pinctrl",
22 "marvell,berlin2q-soc-pinctrl", 22 "marvell,berlin2q-soc-pinctrl",
23 "marvell,berlin2q-system-pinctrl" 23 "marvell,berlin2q-system-pinctrl",
24 "marvell,berlin4ct-avio-pinctrl",
25 "marvell,berlin4ct-soc-pinctrl",
26 "marvell,berlin4ct-system-pinctrl"
24 27
25Required subnode-properties: 28Required subnode-properties:
26- groups: a list of strings describing the group names. 29- groups: a list of strings describing the group names.
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-gpio.txt b/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-gpio.txt
index 6540ca56be5e..16589fb6f420 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,cygnus-gpio.txt
@@ -3,8 +3,8 @@ Broadcom Cygnus GPIO/PINCONF Controller
3Required properties: 3Required properties:
4 4
5- compatible: 5- compatible:
6 Must be "brcm,cygnus-ccm-gpio", "brcm,cygnus-asiu-gpio", or 6 Must be "brcm,cygnus-ccm-gpio", "brcm,cygnus-asiu-gpio",
7 "brcm,cygnus-crmu-gpio" 7 "brcm,cygnus-crmu-gpio" or "brcm,iproc-gpio"
8 8
9- reg: 9- reg:
10 Define the base and range of the I/O address space that contains the Cygnus 10 Define the base and range of the I/O address space that contains the Cygnus
@@ -26,9 +26,13 @@ Optional properties:
26- interrupt-controller: 26- interrupt-controller:
27 Specifies that the node is an interrupt controller 27 Specifies that the node is an interrupt controller
28 28
29- pinmux: 29- gpio-ranges:
30 Specifies the phandle to the IOMUX device, where pins can be individually 30 Specifies the mapping between gpio controller and pin-controllers pins.
31muxed to GPIO 31 This requires 4 fields in cells defined as -
32 1. Phandle of pin-controller.
33 2. GPIO base pin offset.
34 3 Pin-control base pin offset.
35 4. number of gpio pins which are linearly mapped from pin base.
32 36
33Supported generic PINCONF properties in child nodes: 37Supported generic PINCONF properties in child nodes:
34 38
@@ -78,6 +82,8 @@ Example:
78 gpio-controller; 82 gpio-controller;
79 interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>; 83 interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
80 interrupt-controller; 84 interrupt-controller;
85 gpio-ranges = <&pinctrl 0 42 1>,
86 <&pinctrl 1 44 3>;
81 }; 87 };
82 88
83 /* 89 /*
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
index 8bbf25d58656..457b2c68d47b 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
@@ -1,16 +1,42 @@
1* Freescale i.MX7 Dual IOMUX Controller 1* Freescale i.MX7 Dual IOMUX Controller
2 2
3iMX7D supports two iomuxc controllers, fsl,imx7d-iomuxc controller is similar
4as previous iMX SoC generation and fsl,imx7d-iomuxc-lpsr which provides low
5power state retention capabilities on gpios that are part of iomuxc-lpsr
6(GPIO1_IO7..GPIO1_IO0). While iomuxc-lpsr provides its own set of registers for
7mux and pad control settings, it shares the input select register from main
8iomuxc controller for daisy chain settings, the fsl,input-sel property extends
9fsl,imx-pinctrl driver to support iomuxc-lpsr controller.
10
11iomuxc_lpsr: iomuxc-lpsr@302c0000 {
12 compatible = "fsl,imx7d-iomuxc-lpsr";
13 reg = <0x302c0000 0x10000>;
14 fsl,input-sel = <&iomuxc>;
15};
16
17iomuxc: iomuxc@30330000 {
18 compatible = "fsl,imx7d-iomuxc";
19 reg = <0x30330000 0x10000>;
20};
21
22Pheriparials using pads from iomuxc-lpsr support low state retention power
23state, under LPSR mode GPIO's state of pads are retain.
24
3Please refer to fsl,imx-pinctrl.txt in this directory for common binding part 25Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
4and usage. 26and usage.
5 27
6Required properties: 28Required properties:
7- compatible: "fsl,imx7d-iomuxc" 29- compatible: "fsl,imx7d-iomuxc" for main IOMUXC controller, or
30 "fsl,imx7d-iomuxc-lpsr" for Low Power State Retention IOMUXC controller.
8- fsl,pins: each entry consists of 6 integers and represents the mux and config 31- fsl,pins: each entry consists of 6 integers and represents the mux and config
9 setting for one pin. The first 5 integers <mux_reg conf_reg input_reg mux_val 32 setting for one pin. The first 5 integers <mux_reg conf_reg input_reg mux_val
10 input_val> are specified using a PIN_FUNC_ID macro, which can be found in 33 input_val> are specified using a PIN_FUNC_ID macro, which can be found in
11 imx7d-pinfunc.h under device tree source folder. The last integer CONFIG is 34 imx7d-pinfunc.h under device tree source folder. The last integer CONFIG is
12 the pad setting value like pull-up on this pin. Please refer to i.MX7 Dual 35 the pad setting value like pull-up on this pin. Please refer to i.MX7 Dual
13 Reference Manual for detailed CONFIG settings. 36 Reference Manual for detailed CONFIG settings.
37- fsl,input-sel: required property for iomuxc-lpsr controller, this property is
38 a phandle for main iomuxc controller which shares the input select register for
39 daisy chain settings.
14 40
15CONFIG bits definition: 41CONFIG bits definition:
16PAD_CTL_PUS_100K_DOWN (0 << 5) 42PAD_CTL_PUS_100K_DOWN (0 << 5)
@@ -25,3 +51,38 @@ PAD_CTL_DSE_X1 (0 << 0)
25PAD_CTL_DSE_X2 (1 << 0) 51PAD_CTL_DSE_X2 (1 << 0)
26PAD_CTL_DSE_X3 (2 << 0) 52PAD_CTL_DSE_X3 (2 << 0)
27PAD_CTL_DSE_X4 (3 << 0) 53PAD_CTL_DSE_X4 (3 << 0)
54
55Examples:
56While iomuxc-lpsr is intended to be used by dedicated peripherals to take
57advantages of LPSR power mode, is also possible that an IP to use pads from
58any of the iomux controllers. For example the I2C1 IP can use SCL pad from
59iomuxc-lpsr controller and SDA pad from iomuxc controller as:
60
61i2c1: i2c@30a20000 {
62 pinctrl-names = "default";
63 pinctrl-0 = <&pinctrl_i2c1_1 &pinctrl_i2c1_2>;
64 status = "okay";
65};
66
67iomuxc-lpsr@302c0000 {
68 compatible = "fsl,imx7d-iomuxc-lpsr";
69 reg = <0x302c0000 0x10000>;
70 fsl,input-sel = <&iomuxc>;
71
72 pinctrl_i2c1_1: i2c1grp-1 {
73 fsl,pins = <
74 MX7D_PAD_GPIO1_IO04__I2C1_SCL 0x4000007f
75 >;
76 };
77};
78
79iomuxc@30330000 {
80 compatible = "fsl,imx7d-iomuxc";
81 reg = <0x30330000 0x10000>;
82
83 pinctrl_i2c1_2: i2c1grp-2 {
84 fsl,pins = <
85 MX7D_PAD_I2C1_SDA__I2C1_SDA 0x4000007f
86 >;
87 };
88};
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 9496934528bd..ffadb7a371f6 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -19,6 +19,7 @@ Required Properties:
19 - "renesas,pfc-r8a7791": for R8A7791 (R-Car M2-W) compatible pin-controller. 19 - "renesas,pfc-r8a7791": for R8A7791 (R-Car M2-W) compatible pin-controller.
20 - "renesas,pfc-r8a7793": for R8A7793 (R-Car M2-N) compatible pin-controller. 20 - "renesas,pfc-r8a7793": for R8A7793 (R-Car M2-N) compatible pin-controller.
21 - "renesas,pfc-r8a7794": for R8A7794 (R-Car E2) compatible pin-controller. 21 - "renesas,pfc-r8a7794": for R8A7794 (R-Car E2) compatible pin-controller.
22 - "renesas,pfc-r8a7795": for R8A7795 (R-Car H3) compatible pin-controller.
22 - "renesas,pfc-sh73a0": for SH73A0 (SH-Mobile AG5) compatible pin-controller. 23 - "renesas,pfc-sh73a0": for SH73A0 (SH-Mobile AG5) compatible pin-controller.
23 24
24 - reg: Base address and length of each memory resource used by the pin 25 - reg: Base address and length of each memory resource used by the pin
diff --git a/Documentation/edac.txt b/Documentation/edac.txt
index 0cf27a3544a5..80841a2d640c 100644
--- a/Documentation/edac.txt
+++ b/Documentation/edac.txt
@@ -744,6 +744,52 @@ exports one
744 possible that some errors could be lost. With rdimm's, they display the 744 possible that some errors could be lost. With rdimm's, they display the
745 contents of the registers 745 contents of the registers
746 746
747AMD64_EDAC REFERENCE DOCUMENTS USED
748-----------------------------------
749amd64_edac module is based on the following documents
750(available from http://support.amd.com/en-us/search/tech-docs):
751
7521. Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
753 Opteron Processors
754 AMD publication #: 26094
755 Revision: 3.26
756 Link: http://support.amd.com/TechDocs/26094.PDF
757
7582. Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
759 Processors
760 AMD publication #: 32559
761 Revision: 3.00
762 Issue Date: May 2006
763 Link: http://support.amd.com/TechDocs/32559.pdf
764
7653. Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
766 Processors
767 AMD publication #: 31116
768 Revision: 3.00
769 Issue Date: September 07, 2007
770 Link: http://support.amd.com/TechDocs/31116.pdf
771
7724. Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 15h
773 Models 30h-3Fh Processors
774 AMD publication #: 49125
775 Revision: 3.06
776 Issue Date: 2/12/2015 (latest release)
777 Link: http://support.amd.com/TechDocs/49125_15h_Models_30h-3Fh_BKDG.pdf
778
7795. Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 15h
780 Models 60h-6Fh Processors
781 AMD publication #: 50742
782 Revision: 3.01
783 Issue Date: 7/23/2015 (latest release)
784 Link: http://support.amd.com/TechDocs/50742_15h_Models_60h-6Fh_BKDG.pdf
785
7866. Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 16h
787 Models 00h-0Fh Processors
788 AMD publication #: 48751
789 Revision: 3.03
790 Issue Date: 2/23/2015 (latest release)
791 Link: http://support.amd.com/TechDocs/48751_16h_bkdg.pdf
792
747CREDITS: 793CREDITS:
748======== 794========
749 795
diff --git a/Documentation/features/vm/THP/arch-support.txt b/Documentation/features/vm/THP/arch-support.txt
index df384e3e845f..523f8307b9cd 100644
--- a/Documentation/features/vm/THP/arch-support.txt
+++ b/Documentation/features/vm/THP/arch-support.txt
@@ -7,7 +7,7 @@
7 | arch |status| 7 | arch |status|
8 ----------------------- 8 -----------------------
9 | alpha: | TODO | 9 | alpha: | TODO |
10 | arc: | .. | 10 | arc: | ok |
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | avr32: | .. | 13 | avr32: | .. |
diff --git a/Documentation/features/vm/pte_special/arch-support.txt b/Documentation/features/vm/pte_special/arch-support.txt
index aaaa21db6226..3de5434c857c 100644
--- a/Documentation/features/vm/pte_special/arch-support.txt
+++ b/Documentation/features/vm/pte_special/arch-support.txt
@@ -7,7 +7,7 @@
7 | arch |status| 7 | arch |status|
8 ----------------------- 8 -----------------------
9 | alpha: | TODO | 9 | alpha: | TODO |
10 | arc: | TODO | 10 | arc: | ok |
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | avr32: | TODO | 13 | avr32: | TODO |
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index d411ca63c8b6..3a9d65c912e7 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -140,7 +140,8 @@ Table 1-1: Process specific entries in /proc
140 stat Process status 140 stat Process status
141 statm Process memory status information 141 statm Process memory status information
142 status Process status in human readable form 142 status Process status in human readable form
143 wchan If CONFIG_KALLSYMS is set, a pre-decoded wchan 143 wchan Present with CONFIG_KALLSYMS=y: it shows the kernel function
144 symbol the task is blocked in - or "0" if not blocked.
144 pagemap Page table 145 pagemap Page table
145 stack Report full stack trace, enable via CONFIG_STACKTRACE 146 stack Report full stack trace, enable via CONFIG_STACKTRACE
146 smaps a extension based on maps, showing the memory consumption of 147 smaps a extension based on maps, showing the memory consumption of
@@ -310,7 +311,7 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
310 blocked bitmap of blocked signals 311 blocked bitmap of blocked signals
311 sigign bitmap of ignored signals 312 sigign bitmap of ignored signals
312 sigcatch bitmap of caught signals 313 sigcatch bitmap of caught signals
313 wchan address where process went to sleep 314 0 (place holder, used to be the wchan address, use /proc/PID/wchan instead)
314 0 (place holder) 315 0 (place holder)
315 0 (place holder) 316 0 (place holder)
316 exit_signal signal to send to parent thread on exit 317 exit_signal signal to send to parent thread on exit
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
index 90d0f6aba7a6..12a61948ec91 100644
--- a/Documentation/gpio/driver.txt
+++ b/Documentation/gpio/driver.txt
@@ -62,6 +62,11 @@ Any debugfs dump method should normally ignore signals which haven't been
62requested as GPIOs. They can use gpiochip_is_requested(), which returns either 62requested as GPIOs. They can use gpiochip_is_requested(), which returns either
63NULL or the label associated with that GPIO when it was requested. 63NULL or the label associated with that GPIO when it was requested.
64 64
65RT_FULL: GPIO driver should not use spinlock_t or any sleepable APIs
66(like PM runtime) in its gpio_chip implementation (.get/.set and direction
67control callbacks) if it is expected to call GPIO APIs from atomic context
68on -RT (inside hard IRQ handlers and similar contexts). Normally this should
69not be required.
65 70
66GPIO drivers providing IRQs 71GPIO drivers providing IRQs
67--------------------------- 72---------------------------
@@ -73,6 +78,13 @@ The IRQ portions of the GPIO block are implemented using an irqchip, using
73the header <linux/irq.h>. So basically such a driver is utilizing two sub- 78the header <linux/irq.h>. So basically such a driver is utilizing two sub-
74systems simultaneously: gpio and irq. 79systems simultaneously: gpio and irq.
75 80
81RT_FULL: GPIO driver should not use spinlock_t or any sleepable APIs
82(like PM runtime) as part of its irq_chip implementation on -RT.
83- spinlock_t should be replaced with raw_spinlock_t [1].
84- If sleepable APIs have to be used, these can be done from the .irq_bus_lock()
85 and .irq_bus_unlock() callbacks, as these are the only slowpath callbacks
86 on an irqchip. Create the callbacks if needed [2].
87
76GPIO irqchips usually fall in one of two categories: 88GPIO irqchips usually fall in one of two categories:
77 89
78* CHAINED GPIO irqchips: these are usually the type that is embedded on 90* CHAINED GPIO irqchips: these are usually the type that is embedded on
@@ -93,6 +105,38 @@ GPIO irqchips usually fall in one of two categories:
93 Chained GPIO irqchips typically can NOT set the .can_sleep flag on 105 Chained GPIO irqchips typically can NOT set the .can_sleep flag on
94 struct gpio_chip, as everything happens directly in the callbacks. 106 struct gpio_chip, as everything happens directly in the callbacks.
95 107
108 RT_FULL: Note, chained IRQ handlers will not be forced threaded on -RT.
109 As result, spinlock_t or any sleepable APIs (like PM runtime) can't be used
110 in chained IRQ handler.
111 if required (and if it can't be converted to the nested threaded GPIO irqchip)
112 - chained IRQ handler can be converted to generic irq handler and this way
113 it will be threaded IRQ handler on -RT and hard IRQ handler on non-RT
114 (for example, see [3]).
115 Know W/A: The generic_handle_irq() is expected to be called with IRQ disabled,
116 so IRQ core will complain if it will be called from IRQ handler wich is forced
117 thread. The "fake?" raw lock can be used to W/A this problem:
118
119 raw_spinlock_t wa_lock;
120 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
121 unsigned long wa_lock_flags;
122 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
123 generic_handle_irq(irq_find_mapping(bank->chip.irqdomain, bit));
124 raw_spin_unlock_irqrestore(&bank->wa_lock, wa_lock_flags);
125
126* GENERIC CHAINED GPIO irqchips: these are the same as "CHAINED GPIO irqchips",
127 but chained IRQ handlers are not used. Instead GPIO IRQs dispatching is
128 performed by generic IRQ handler which is configured using request_irq().
129 The GPIO irqchip will then end up calling something like this sequence in
130 its interrupt handler:
131
132 static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
133 for each detected GPIO IRQ
134 generic_handle_irq(...);
135
136 RT_FULL: Such kind of handlers will be forced threaded on -RT, as result IRQ
137 core will complain that generic_handle_irq() is called with IRQ enabled and
138 the same W/A as for "CHAINED GPIO irqchips" can be applied.
139
96* NESTED THREADED GPIO irqchips: these are off-chip GPIO expanders and any 140* NESTED THREADED GPIO irqchips: these are off-chip GPIO expanders and any
97 other GPIO irqchip residing on the other side of a sleeping bus. Of course 141 other GPIO irqchip residing on the other side of a sleeping bus. Of course
98 such drivers that need slow bus traffic to read out IRQ status and similar, 142 such drivers that need slow bus traffic to read out IRQ status and similar,
@@ -133,6 +177,13 @@ To use the helpers please keep the following in mind:
133 the irqchip can initialize. E.g. .dev and .can_sleep shall be set up 177 the irqchip can initialize. E.g. .dev and .can_sleep shall be set up
134 properly. 178 properly.
135 179
180- Nominally set all handlers to handle_bad_irq() in the setup call and pass
181 handle_bad_irq() as flow handler parameter in gpiochip_irqchip_add() if it is
182 expected for GPIO driver that irqchip .set_type() callback have to be called
183 before using/enabling GPIO IRQ. Then set the handler to handle_level_irq()
184 and/or handle_edge_irq() in the irqchip .set_type() callback depending on
185 what your controller supports.
186
136It is legal for any IRQ consumer to request an IRQ from any irqchip no matter 187It is legal for any IRQ consumer to request an IRQ from any irqchip no matter
137if that is a combined GPIO+IRQ driver. The basic premise is that gpio_chip and 188if that is a combined GPIO+IRQ driver. The basic premise is that gpio_chip and
138irq_chip are orthogonal, and offering their services independent of each 189irq_chip are orthogonal, and offering their services independent of each
@@ -169,6 +220,31 @@ When implementing an irqchip inside a GPIO driver, these two functions should
169typically be called in the .startup() and .shutdown() callbacks from the 220typically be called in the .startup() and .shutdown() callbacks from the
170irqchip. 221irqchip.
171 222
223Real-Time compliance for GPIO IRQ chips
224---------------------------------------
225
226Any provider of irqchips needs to be carefully tailored to support Real Time
227preemption. It is desireable that all irqchips in the GPIO subsystem keep this
228in mind and does the proper testing to assure they are real time-enabled.
229So, pay attention on above " RT_FULL:" notes, please.
230The following is a checklist to follow when preparing a driver for real
231time-compliance:
232
233- ensure spinlock_t is not used as part irq_chip implementation;
234- ensure that sleepable APIs are not used as part irq_chip implementation.
235 If sleepable APIs have to be used, these can be done from the .irq_bus_lock()
236 and .irq_bus_unlock() callbacks;
237- Chained GPIO irqchips: ensure spinlock_t or any sleepable APIs are not used
238 from chained IRQ handler;
239- Generic chained GPIO irqchips: take care about generic_handle_irq() calls and
240 apply corresponding W/A;
241- Chained GPIO irqchips: get rid of chained IRQ handler and use generic irq
242 handler if possible :)
243- regmap_mmio: Sry, but you are in trouble :( if MMIO regmap is used as for
244 GPIO IRQ chip implementation;
245- Test your driver with the appropriate in-kernel real time test cases for both
246 level and edge IRQs.
247
172 248
173Requesting self-owned GPIO pins 249Requesting self-owned GPIO pins
174------------------------------- 250-------------------------------
@@ -190,3 +266,7 @@ gpiochip_free_own_desc().
190These functions must be used with care since they do not affect module use 266These functions must be used with care since they do not affect module use
191count. Do not use the functions to request gpio descriptors not owned by the 267count. Do not use the functions to request gpio descriptors not owned by the
192calling driver. 268calling driver.
269
270[1] http://www.spinics.net/lists/linux-omap/msg120425.html
271[2] https://lkml.org/lkml/2015/9/25/494
272[3] https://lkml.org/lkml/2015/9/25/495
diff --git a/Documentation/hwmon/lm75 b/Documentation/hwmon/lm75
index 67691a0aa41d..ac95edfcd907 100644
--- a/Documentation/hwmon/lm75
+++ b/Documentation/hwmon/lm75
@@ -42,8 +42,8 @@ Supported chips:
42 Addresses scanned: none 42 Addresses scanned: none
43 Datasheet: Publicly available at the ST website 43 Datasheet: Publicly available at the ST website
44 http://www.st.com/internet/analog/product/121769.jsp 44 http://www.st.com/internet/analog/product/121769.jsp
45 * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP175, TMP275 45 * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP75C, TMP175, TMP275
46 Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp275' 46 Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp75c', 'tmp275'
47 Addresses scanned: none 47 Addresses scanned: none
48 Datasheet: Publicly available at the Texas Instruments website 48 Datasheet: Publicly available at the Texas Instruments website
49 http://www.ti.com/product/tmp100 49 http://www.ti.com/product/tmp100
@@ -51,6 +51,7 @@ Supported chips:
51 http://www.ti.com/product/tmp105 51 http://www.ti.com/product/tmp105
52 http://www.ti.com/product/tmp112 52 http://www.ti.com/product/tmp112
53 http://www.ti.com/product/tmp75 53 http://www.ti.com/product/tmp75
54 http://www.ti.com/product/tmp75c
54 http://www.ti.com/product/tmp175 55 http://www.ti.com/product/tmp175
55 http://www.ti.com/product/tmp275 56 http://www.ti.com/product/tmp275
56 * NXP LM75B 57 * NXP LM75B
diff --git a/Documentation/hwmon/max31790 b/Documentation/hwmon/max31790
new file mode 100644
index 000000000000..855e62430da9
--- /dev/null
+++ b/Documentation/hwmon/max31790
@@ -0,0 +1,37 @@
1Kernel driver max31790
2======================
3
4Supported chips:
5 * Maxim MAX31790
6 Prefix: 'max31790'
7 Addresses scanned: -
8 Datasheet: http://pdfserv.maximintegrated.com/en/ds/MAX31790.pdf
9
10Author: Il Han <corone.il.han@gmail.com>
11
12
13Description
14-----------
15
16This driver implements support for the Maxim MAX31790 chip.
17
18The MAX31790 controls the speeds of up to six fans using six independent
19PWM outputs. The desired fan speeds (or PWM duty cycles) are written
20through the I2C interface. The outputs drive "4-wire" fans directly,
21or can be used to modulate the fan's power terminals using an external
22pass transistor.
23
24Tachometer inputs monitor fan tachometer logic outputs for precise (+/-1%)
25monitoring and control of fan RPM as well as detection of fan failure.
26Six pins are dedicated tachometer inputs. Any of the six PWM outputs can
27also be configured to serve as tachometer inputs.
28
29
30Sysfs entries
31-------------
32
33fan[1-12]_input RO fan tachometer speed in RPM
34fan[1-12]_fault RO fan experienced fault
35fan[1-6]_target RW desired fan speed in RPM
36pwm[1-6]_enable RW regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
37pwm[1-6] RW fan target duty cycle (0-255)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 22a4b687ea5b..046832ef14ce 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1094,6 +1094,21 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1094 you are really sure that your UEFI does sane gc and 1094 you are really sure that your UEFI does sane gc and
1095 fulfills the spec otherwise your board may brick. 1095 fulfills the spec otherwise your board may brick.
1096 1096
1097 efi_fake_mem= nn[KMG]@ss[KMG]:aa[,nn[KMG]@ss[KMG]:aa,..] [EFI; X86]
1098 Add arbitrary attribute to specific memory range by
1099 updating original EFI memory map.
1100 Region of memory which aa attribute is added to is
1101 from ss to ss+nn.
1102 If efi_fake_mem=2G@4G:0x10000,2G@0x10a0000000:0x10000
1103 is specified, EFI_MEMORY_MORE_RELIABLE(0x10000)
1104 attribute is added to range 0x100000000-0x180000000 and
1105 0x10a0000000-0x1120000000.
1106
1107 Using this parameter you can do debugging of EFI memmap
1108 related feature. For example, you can do debugging of
1109 Address Range Mirroring feature even if your box
1110 doesn't support it.
1111
1097 eisa_irq_edge= [PARISC,HW] 1112 eisa_irq_edge= [PARISC,HW]
1098 See header of drivers/parisc/eisa.c. 1113 See header of drivers/parisc/eisa.c.
1099 1114
@@ -3074,9 +3089,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3074 cache-to-cache transfer latencies. 3089 cache-to-cache transfer latencies.
3075 3090
3076 rcutree.rcu_fanout_leaf= [KNL] 3091 rcutree.rcu_fanout_leaf= [KNL]
3077 Increase the number of CPUs assigned to each 3092 Change the number of CPUs assigned to each
3078 leaf rcu_node structure. Useful for very large 3093 leaf rcu_node structure. Useful for very
3079 systems. 3094 large systems, which will choose the value 64,
3095 and for NUMA systems with large remote-access
3096 latencies, which will choose a value aligned
3097 with the appropriate hardware boundaries.
3080 3098
3081 rcutree.jiffies_till_sched_qs= [KNL] 3099 rcutree.jiffies_till_sched_qs= [KNL]
3082 Set required age in jiffies for a 3100 Set required age in jiffies for a
diff --git a/Documentation/locking/lockstat.txt b/Documentation/locking/lockstat.txt
index 568bbbacee91..5786ad2cd5e6 100644
--- a/Documentation/locking/lockstat.txt
+++ b/Documentation/locking/lockstat.txt
@@ -12,7 +12,7 @@ Because things like lock contention can severely impact performance.
12- HOW 12- HOW
13 13
14Lockdep already has hooks in the lock functions and maps lock instances to 14Lockdep already has hooks in the lock functions and maps lock instances to
15lock classes. We build on that (see Documentation/lokcing/lockdep-design.txt). 15lock classes. We build on that (see Documentation/locking/lockdep-design.txt).
16The graph below shows the relation between the lock functions and the various 16The graph below shows the relation between the lock functions and the various
17hooks therein. 17hooks therein.
18 18
diff --git a/Documentation/locking/locktorture.txt b/Documentation/locking/locktorture.txt
index 619f2bb136a5..a2ef3a929bf1 100644
--- a/Documentation/locking/locktorture.txt
+++ b/Documentation/locking/locktorture.txt
@@ -52,6 +52,9 @@ torture_type Type of lock to torture. By default, only spinlocks will
52 52
53 o "mutex_lock": mutex_lock() and mutex_unlock() pairs. 53 o "mutex_lock": mutex_lock() and mutex_unlock() pairs.
54 54
55 o "rtmutex_lock": rtmutex_lock() and rtmutex_unlock()
56 pairs. Kernel must have CONFIG_RT_MUTEX=y.
57
55 o "rwsem_lock": read/write down() and up() semaphore pairs. 58 o "rwsem_lock": read/write down() and up() semaphore pairs.
56 59
57torture_runnable Start locktorture at boot time in the case where the 60torture_runnable Start locktorture at boot time in the case where the
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 2ba8461b0631..aef9487303d0 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -617,16 +617,16 @@ case what's actually required is:
617However, stores are not speculated. This means that ordering -is- provided 617However, stores are not speculated. This means that ordering -is- provided
618for load-store control dependencies, as in the following example: 618for load-store control dependencies, as in the following example:
619 619
620 q = READ_ONCE_CTRL(a); 620 q = READ_ONCE(a);
621 if (q) { 621 if (q) {
622 WRITE_ONCE(b, p); 622 WRITE_ONCE(b, p);
623 } 623 }
624 624
625Control dependencies pair normally with other types of barriers. That 625Control dependencies pair normally with other types of barriers. That
626said, please note that READ_ONCE_CTRL() is not optional! Without the 626said, please note that READ_ONCE() is not optional! Without the
627READ_ONCE_CTRL(), the compiler might combine the load from 'a' with 627READ_ONCE(), the compiler might combine the load from 'a' with other
628other loads from 'a', and the store to 'b' with other stores to 'b', 628loads from 'a', and the store to 'b' with other stores to 'b', with
629with possible highly counterintuitive effects on ordering. 629possible highly counterintuitive effects on ordering.
630 630
631Worse yet, if the compiler is able to prove (say) that the value of 631Worse yet, if the compiler is able to prove (say) that the value of
632variable 'a' is always non-zero, it would be well within its rights 632variable 'a' is always non-zero, it would be well within its rights
@@ -636,15 +636,12 @@ as follows:
636 q = a; 636 q = a;
637 b = p; /* BUG: Compiler and CPU can both reorder!!! */ 637 b = p; /* BUG: Compiler and CPU can both reorder!!! */
638 638
639Finally, the READ_ONCE_CTRL() includes an smp_read_barrier_depends() 639So don't leave out the READ_ONCE().
640that DEC Alpha needs in order to respect control depedencies.
641
642So don't leave out the READ_ONCE_CTRL().
643 640
644It is tempting to try to enforce ordering on identical stores on both 641It is tempting to try to enforce ordering on identical stores on both
645branches of the "if" statement as follows: 642branches of the "if" statement as follows:
646 643
647 q = READ_ONCE_CTRL(a); 644 q = READ_ONCE(a);
648 if (q) { 645 if (q) {
649 barrier(); 646 barrier();
650 WRITE_ONCE(b, p); 647 WRITE_ONCE(b, p);
@@ -658,7 +655,7 @@ branches of the "if" statement as follows:
658Unfortunately, current compilers will transform this as follows at high 655Unfortunately, current compilers will transform this as follows at high
659optimization levels: 656optimization levels:
660 657
661 q = READ_ONCE_CTRL(a); 658 q = READ_ONCE(a);
662 barrier(); 659 barrier();
663 WRITE_ONCE(b, p); /* BUG: No ordering vs. load from a!!! */ 660 WRITE_ONCE(b, p); /* BUG: No ordering vs. load from a!!! */
664 if (q) { 661 if (q) {
@@ -688,7 +685,7 @@ memory barriers, for example, smp_store_release():
688In contrast, without explicit memory barriers, two-legged-if control 685In contrast, without explicit memory barriers, two-legged-if control
689ordering is guaranteed only when the stores differ, for example: 686ordering is guaranteed only when the stores differ, for example:
690 687
691 q = READ_ONCE_CTRL(a); 688 q = READ_ONCE(a);
692 if (q) { 689 if (q) {
693 WRITE_ONCE(b, p); 690 WRITE_ONCE(b, p);
694 do_something(); 691 do_something();
@@ -697,14 +694,14 @@ ordering is guaranteed only when the stores differ, for example:
697 do_something_else(); 694 do_something_else();
698 } 695 }
699 696
700The initial READ_ONCE_CTRL() is still required to prevent the compiler 697The initial READ_ONCE() is still required to prevent the compiler from
701from proving the value of 'a'. 698proving the value of 'a'.
702 699
703In addition, you need to be careful what you do with the local variable 'q', 700In addition, you need to be careful what you do with the local variable 'q',
704otherwise the compiler might be able to guess the value and again remove 701otherwise the compiler might be able to guess the value and again remove
705the needed conditional. For example: 702the needed conditional. For example:
706 703
707 q = READ_ONCE_CTRL(a); 704 q = READ_ONCE(a);
708 if (q % MAX) { 705 if (q % MAX) {
709 WRITE_ONCE(b, p); 706 WRITE_ONCE(b, p);
710 do_something(); 707 do_something();
@@ -717,7 +714,7 @@ If MAX is defined to be 1, then the compiler knows that (q % MAX) is
717equal to zero, in which case the compiler is within its rights to 714equal to zero, in which case the compiler is within its rights to
718transform the above code into the following: 715transform the above code into the following:
719 716
720 q = READ_ONCE_CTRL(a); 717 q = READ_ONCE(a);
721 WRITE_ONCE(b, p); 718 WRITE_ONCE(b, p);
722 do_something_else(); 719 do_something_else();
723 720
@@ -728,7 +725,7 @@ is gone, and the barrier won't bring it back. Therefore, if you are
728relying on this ordering, you should make sure that MAX is greater than 725relying on this ordering, you should make sure that MAX is greater than
729one, perhaps as follows: 726one, perhaps as follows:
730 727
731 q = READ_ONCE_CTRL(a); 728 q = READ_ONCE(a);
732 BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */ 729 BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */
733 if (q % MAX) { 730 if (q % MAX) {
734 WRITE_ONCE(b, p); 731 WRITE_ONCE(b, p);
@@ -745,7 +742,7 @@ of the 'if' statement.
745You must also be careful not to rely too much on boolean short-circuit 742You must also be careful not to rely too much on boolean short-circuit
746evaluation. Consider this example: 743evaluation. Consider this example:
747 744
748 q = READ_ONCE_CTRL(a); 745 q = READ_ONCE(a);
749 if (q || 1 > 0) 746 if (q || 1 > 0)
750 WRITE_ONCE(b, 1); 747 WRITE_ONCE(b, 1);
751 748
@@ -753,7 +750,7 @@ Because the first condition cannot fault and the second condition is
753always true, the compiler can transform this example as following, 750always true, the compiler can transform this example as following,
754defeating control dependency: 751defeating control dependency:
755 752
756 q = READ_ONCE_CTRL(a); 753 q = READ_ONCE(a);
757 WRITE_ONCE(b, 1); 754 WRITE_ONCE(b, 1);
758 755
759This example underscores the need to ensure that the compiler cannot 756This example underscores the need to ensure that the compiler cannot
@@ -767,7 +764,7 @@ x and y both being zero:
767 764
768 CPU 0 CPU 1 765 CPU 0 CPU 1
769 ======================= ======================= 766 ======================= =======================
770 r1 = READ_ONCE_CTRL(x); r2 = READ_ONCE_CTRL(y); 767 r1 = READ_ONCE(x); r2 = READ_ONCE(y);
771 if (r1 > 0) if (r2 > 0) 768 if (r1 > 0) if (r2 > 0)
772 WRITE_ONCE(y, 1); WRITE_ONCE(x, 1); 769 WRITE_ONCE(y, 1); WRITE_ONCE(x, 1);
773 770
@@ -796,11 +793,6 @@ site: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html.
796 793
797In summary: 794In summary:
798 795
799 (*) Control dependencies must be headed by READ_ONCE_CTRL().
800 Or, as a much less preferable alternative, interpose
801 smp_read_barrier_depends() between a READ_ONCE() and the
802 control-dependent write.
803
804 (*) Control dependencies can order prior loads against later stores. 796 (*) Control dependencies can order prior loads against later stores.
805 However, they do -not- guarantee any other sort of ordering: 797 However, they do -not- guarantee any other sort of ordering:
806 Not prior loads against later loads, nor prior stores against 798 Not prior loads against later loads, nor prior stores against
@@ -816,14 +808,13 @@ In summary:
816 between the prior load and the subsequent store, and this 808 between the prior load and the subsequent store, and this
817 conditional must involve the prior load. If the compiler is able 809 conditional must involve the prior load. If the compiler is able
818 to optimize the conditional away, it will have also optimized 810 to optimize the conditional away, it will have also optimized
819 away the ordering. Careful use of READ_ONCE_CTRL() READ_ONCE(), 811 away the ordering. Careful use of READ_ONCE() and WRITE_ONCE()
820 and WRITE_ONCE() can help to preserve the needed conditional. 812 can help to preserve the needed conditional.
821 813
822 (*) Control dependencies require that the compiler avoid reordering the 814 (*) Control dependencies require that the compiler avoid reordering the
823 dependency into nonexistence. Careful use of READ_ONCE_CTRL() 815 dependency into nonexistence. Careful use of READ_ONCE() or
824 or smp_read_barrier_depends() can help to preserve your control 816 atomic{,64}_read() can help to preserve your control dependency.
825 dependency. Please see the Compiler Barrier section for more 817 Please see the Compiler Barrier section for more information.
826 information.
827 818
828 (*) Control dependencies pair normally with other types of barriers. 819 (*) Control dependencies pair normally with other types of barriers.
829 820
@@ -1710,6 +1701,17 @@ There are some more advanced barrier functions:
1710 operations" subsection for information on where to use these. 1701 operations" subsection for information on where to use these.
1711 1702
1712 1703
1704 (*) lockless_dereference();
1705 This can be thought of as a pointer-fetch wrapper around the
1706 smp_read_barrier_depends() data-dependency barrier.
1707
1708 This is also similar to rcu_dereference(), but in cases where
1709 object lifetime is handled by some mechanism other than RCU, for
1710 example, when the objects removed only when the system goes down.
1711 In addition, lockless_dereference() is used in some data structures
1712 that can be used both with and without RCU.
1713
1714
1713 (*) dma_wmb(); 1715 (*) dma_wmb();
1714 (*) dma_rmb(); 1716 (*) dma_rmb();
1715 1717
@@ -1789,7 +1791,6 @@ The Linux kernel has a number of locking constructs:
1789 (*) mutexes 1791 (*) mutexes
1790 (*) semaphores 1792 (*) semaphores
1791 (*) R/W semaphores 1793 (*) R/W semaphores
1792 (*) RCU
1793 1794
1794In all cases there are variants on "ACQUIRE" operations and "RELEASE" operations 1795In all cases there are variants on "ACQUIRE" operations and "RELEASE" operations
1795for each construct. These operations all imply certain barriers: 1796for each construct. These operations all imply certain barriers:
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index 189bab09255a..caa555706f89 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -72,13 +72,3 @@ Note on raw_rpmb_size_mult:
72 "raw_rpmb_size_mult" is a mutliple of 128kB block. 72 "raw_rpmb_size_mult" is a mutliple of 128kB block.
73 RPMB size in byte is calculated by using the following equation: 73 RPMB size in byte is calculated by using the following equation:
74 RPMB partition size = 128kB x raw_rpmb_size_mult 74 RPMB partition size = 128kB x raw_rpmb_size_mult
75
76SD/MMC/SDIO Clock Gating Attribute
77==================================
78
79Read and write access is provided to following attribute.
80This attribute appears only if CONFIG_MMC_CLKGATE is enabled.
81
82 clkgate_delay Tune the clock gating delay with desired value in milliseconds.
83
84echo <desired delay> > /sys/class/mmc_host/mmcX/clkgate_delay
diff --git a/MAINTAINERS b/MAINTAINERS
index fb7d2e4af200..dcc8ed6fccde 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -240,6 +240,12 @@ L: lm-sensors@lm-sensors.org
240S: Maintained 240S: Maintained
241F: drivers/hwmon/abituguru3.c 241F: drivers/hwmon/abituguru3.c
242 242
243ACCES 104-IDIO-16 GPIO DRIVER
244M: "William Breathitt Gray" <vilhelm.gray@gmail.com>
245L: linux-gpio@vger.kernel.org
246S: Maintained
247F: drivers/gpio/gpio-104-idio-16.c
248
243ACENIC DRIVER 249ACENIC DRIVER
244M: Jes Sorensen <jes@trained-monkey.org> 250M: Jes Sorensen <jes@trained-monkey.org>
245L: linux-acenic@sunsite.dk 251L: linux-acenic@sunsite.dk
@@ -654,11 +660,6 @@ F: drivers/gpu/drm/radeon/radeon_kfd.c
654F: drivers/gpu/drm/radeon/radeon_kfd.h 660F: drivers/gpu/drm/radeon/radeon_kfd.h
655F: include/uapi/linux/kfd_ioctl.h 661F: include/uapi/linux/kfd_ioctl.h
656 662
657AMD MICROCODE UPDATE SUPPORT
658M: Borislav Petkov <bp@alien8.de>
659S: Maintained
660F: arch/x86/kernel/cpu/microcode/amd*
661
662AMD XGBE DRIVER 663AMD XGBE DRIVER
663M: Tom Lendacky <thomas.lendacky@amd.com> 664M: Tom Lendacky <thomas.lendacky@amd.com>
664L: netdev@vger.kernel.org 665L: netdev@vger.kernel.org
@@ -894,11 +895,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
894L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 895L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
895S: Maintained 896S: Maintained
896 897
897ARM/Allwinner A1X SoC support 898ARM/Allwinner sunXi SoC support
898M: Maxime Ripard <maxime.ripard@free-electrons.com> 899M: Maxime Ripard <maxime.ripard@free-electrons.com>
900M: Chen-Yu Tsai <wens@csie.org>
899L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 901L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
900S: Maintained 902S: Maintained
901N: sun[x4567]i 903N: sun[x456789]i
902 904
903ARM/Allwinner SoC Clock Support 905ARM/Allwinner SoC Clock Support
904M: Emilio López <emilio@elopez.com.ar> 906M: Emilio López <emilio@elopez.com.ar>
@@ -1779,6 +1781,14 @@ S: Supported
1779F: Documentation/aoe/ 1781F: Documentation/aoe/
1780F: drivers/block/aoe/ 1782F: drivers/block/aoe/
1781 1783
1784ATHEROS 71XX/9XXX GPIO DRIVER
1785M: Alban Bedel <albeu@free.fr>
1786W: https://github.com/AlbanBedel/linux
1787T: git git://github.com/AlbanBedel/linux
1788S: Maintained
1789F: drivers/gpio/gpio-ath79.c
1790F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt
1791
1782ATHEROS ATH GENERIC UTILITIES 1792ATHEROS ATH GENERIC UTILITIES
1783M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com> 1793M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
1784L: linux-wireless@vger.kernel.org 1794L: linux-wireless@vger.kernel.org
@@ -4427,6 +4437,14 @@ L: linuxppc-dev@lists.ozlabs.org
4427S: Maintained 4437S: Maintained
4428F: drivers/net/ethernet/freescale/ucc_geth* 4438F: drivers/net/ethernet/freescale/ucc_geth*
4429 4439
4440FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
4441M: Claudiu Manoil <claudiu.manoil@freescale.com>
4442L: netdev@vger.kernel.org
4443S: Maintained
4444F: drivers/net/ethernet/freescale/gianfar*
4445X: drivers/net/ethernet/freescale/gianfar_ptp.c
4446F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
4447
4430FREESCALE QUICC ENGINE UCC UART DRIVER 4448FREESCALE QUICC ENGINE UCC UART DRIVER
4431M: Timur Tabi <timur@tabi.org> 4449M: Timur Tabi <timur@tabi.org>
4432L: linuxppc-dev@lists.ozlabs.org 4450L: linuxppc-dev@lists.ozlabs.org
@@ -5445,12 +5463,6 @@ W: https://01.org/linux-acpi
5445S: Supported 5463S: Supported
5446F: drivers/platform/x86/intel_menlow.c 5464F: drivers/platform/x86/intel_menlow.c
5447 5465
5448INTEL IA32 MICROCODE UPDATE SUPPORT
5449M: Borislav Petkov <bp@alien8.de>
5450S: Maintained
5451F: arch/x86/kernel/cpu/microcode/core*
5452F: arch/x86/kernel/cpu/microcode/intel*
5453
5454INTEL I/OAT DMA DRIVER 5466INTEL I/OAT DMA DRIVER
5455M: Dave Jiang <dave.jiang@intel.com> 5467M: Dave Jiang <dave.jiang@intel.com>
5456R: Dan Williams <dan.j.williams@intel.com> 5468R: Dan Williams <dan.j.williams@intel.com>
@@ -8172,6 +8184,13 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
8172S: Maintained 8184S: Maintained
8173F: drivers/pinctrl/pinctrl-at91.* 8185F: drivers/pinctrl/pinctrl-at91.*
8174 8186
8187PIN CONTROLLER - ATMEL AT91 PIO4
8188M: Ludovic Desroches <ludovic.desroches@atmel.com>
8189L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
8190L: linux-gpio@vger.kernel.org
8191S: Supported
8192F: drivers/pinctrl/pinctrl-at91-pio4.*
8193
8175PIN CONTROLLER - INTEL 8194PIN CONTROLLER - INTEL
8176M: Mika Westerberg <mika.westerberg@linux.intel.com> 8195M: Mika Westerberg <mika.westerberg@linux.intel.com>
8177M: Heikki Krogerus <heikki.krogerus@linux.intel.com> 8196M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
@@ -10091,6 +10110,7 @@ F: include/net/switchdev.h
10091 10110
10092SYNOPSYS ARC ARCHITECTURE 10111SYNOPSYS ARC ARCHITECTURE
10093M: Vineet Gupta <vgupta@synopsys.com> 10112M: Vineet Gupta <vgupta@synopsys.com>
10113L: linux-snps-arc@lists.infraded.org
10094S: Supported 10114S: Supported
10095F: arch/arc/ 10115F: arch/arc/
10096F: Documentation/devicetree/bindings/arc/* 10116F: Documentation/devicetree/bindings/arc/*
@@ -11474,6 +11494,11 @@ L: linux-edac@vger.kernel.org
11474S: Maintained 11494S: Maintained
11475F: arch/x86/kernel/cpu/mcheck/* 11495F: arch/x86/kernel/cpu/mcheck/*
11476 11496
11497X86 MICROCODE UPDATE SUPPORT
11498M: Borislav Petkov <bp@alien8.de>
11499S: Maintained
11500F: arch/x86/kernel/cpu/microcode/*
11501
11477X86 VDSO 11502X86 VDSO
11478M: Andy Lutomirski <luto@amacapital.net> 11503M: Andy Lutomirski <luto@amacapital.net>
11479L: linux-kernel@vger.kernel.org 11504L: linux-kernel@vger.kernel.org
@@ -11674,6 +11699,7 @@ F: drivers/tty/serial/zs.*
11674ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR 11699ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
11675M: Minchan Kim <minchan@kernel.org> 11700M: Minchan Kim <minchan@kernel.org>
11676M: Nitin Gupta <ngupta@vflare.org> 11701M: Nitin Gupta <ngupta@vflare.org>
11702R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
11677L: linux-mm@kvack.org 11703L: linux-mm@kvack.org
11678S: Maintained 11704S: Maintained
11679F: mm/zsmalloc.c 11705F: mm/zsmalloc.c
diff --git a/Makefile b/Makefile
index d33ab74bffce..d5b37391195f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 3 2PATCHLEVEL = 3
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION =
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index e8c956098424..572b228c44c7 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -17,11 +17,11 @@
17#define ATOMIC_INIT(i) { (i) } 17#define ATOMIC_INIT(i) { (i) }
18#define ATOMIC64_INIT(i) { (i) } 18#define ATOMIC64_INIT(i) { (i) }
19 19
20#define atomic_read(v) ACCESS_ONCE((v)->counter) 20#define atomic_read(v) READ_ONCE((v)->counter)
21#define atomic64_read(v) ACCESS_ONCE((v)->counter) 21#define atomic64_read(v) READ_ONCE((v)->counter)
22 22
23#define atomic_set(v,i) ((v)->counter = (i)) 23#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
24#define atomic64_set(v,i) ((v)->counter = (i)) 24#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
25 25
26/* 26/*
27 * To get proper branch prediction for the main line, we must branch 27 * To get proper branch prediction for the main line, we must branch
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 78c0621d5819..2c2ac3f3ff80 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -76,6 +76,10 @@ config STACKTRACE_SUPPORT
76config HAVE_LATENCYTOP_SUPPORT 76config HAVE_LATENCYTOP_SUPPORT
77 def_bool y 77 def_bool y
78 78
79config HAVE_ARCH_TRANSPARENT_HUGEPAGE
80 def_bool y
81 depends on ARC_MMU_V4
82
79source "init/Kconfig" 83source "init/Kconfig"
80source "kernel/Kconfig.freezer" 84source "kernel/Kconfig.freezer"
81 85
@@ -190,6 +194,16 @@ config NR_CPUS
190 range 2 4096 194 range 2 4096
191 default "4" 195 default "4"
192 196
197config ARC_SMP_HALT_ON_RESET
198 bool "Enable Halt-on-reset boot mode"
199 default y if ARC_UBOOT_SUPPORT
200 help
201 In SMP configuration cores can be configured as Halt-on-reset
202 or they could all start at same time. For Halt-on-reset, non
203 masters are parked until Master kicks them so they can start of
204 at designated entry point. For other case, all jump to common
205 entry point and spin wait for Master's signal.
206
193endif #SMP 207endif #SMP
194 208
195menuconfig ARC_CACHE 209menuconfig ARC_CACHE
@@ -278,6 +292,8 @@ choice
278 default ARC_MMU_V2 if ARC_CPU_750D 292 default ARC_MMU_V2 if ARC_CPU_750D
279 default ARC_MMU_V4 if ARC_CPU_HS 293 default ARC_MMU_V4 if ARC_CPU_HS
280 294
295if ISA_ARCOMPACT
296
281config ARC_MMU_V1 297config ARC_MMU_V1
282 bool "MMU v1" 298 bool "MMU v1"
283 help 299 help
@@ -297,6 +313,8 @@ config ARC_MMU_V3
297 Variable Page size (1k-16k), var JTLB size 128 x (2 or 4) 313 Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
298 Shared Address Spaces (SASID) 314 Shared Address Spaces (SASID)
299 315
316endif
317
300config ARC_MMU_V4 318config ARC_MMU_V4
301 bool "MMU v4" 319 bool "MMU v4"
302 depends on ISA_ARCV2 320 depends on ISA_ARCV2
@@ -428,6 +446,28 @@ config LINUX_LINK_BASE
428 Linux needs to be scooted a bit. 446 Linux needs to be scooted a bit.
429 If you don't know what the above means, leave this setting alone. 447 If you don't know what the above means, leave this setting alone.
430 448
449config HIGHMEM
450 bool "High Memory Support"
451 help
452 With ARC 2G:2G address split, only upper 2G is directly addressable by
453 kernel. Enable this to potentially allow access to rest of 2G and PAE
454 in future
455
456config ARC_HAS_PAE40
457 bool "Support for the 40-bit Physical Address Extension"
458 default n
459 depends on ISA_ARCV2
460 select HIGHMEM
461 help
462 Enable access to physical memory beyond 4G, only supported on
463 ARC cores with 40 bit Physical Addressing support
464
465config ARCH_PHYS_ADDR_T_64BIT
466 def_bool ARC_HAS_PAE40
467
468config ARCH_DMA_ADDR_T_64BIT
469 bool
470
431config ARC_CURR_IN_REG 471config ARC_CURR_IN_REG
432 bool "Dedicate Register r25 for current_task pointer" 472 bool "Dedicate Register r25 for current_task pointer"
433 default y 473 default y
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index a5e2726a067e..420dcfde289f 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -95,6 +95,6 @@
95 #size-cells = <1>; 95 #size-cells = <1>;
96 ranges = <0x00000000 0x80000000 0x40000000>; 96 ranges = <0x00000000 0x80000000 0x40000000>;
97 device_type = "memory"; 97 device_type = "memory";
98 reg = <0x00000000 0x20000000>; /* 512MiB */ 98 reg = <0x80000000 0x20000000>; /* 512MiB */
99 }; 99 };
100}; 100};
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 846481f37eef..f90fadf7f94e 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -98,6 +98,6 @@
98 #size-cells = <1>; 98 #size-cells = <1>;
99 ranges = <0x00000000 0x80000000 0x40000000>; 99 ranges = <0x00000000 0x80000000 0x40000000>;
100 device_type = "memory"; 100 device_type = "memory";
101 reg = <0x00000000 0x20000000>; /* 512MiB */ 101 reg = <0x80000000 0x20000000>; /* 512MiB */
102 }; 102 };
103}; 103};
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 2f0b33257db2..06a9f294a2e6 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -121,6 +121,6 @@
121 #size-cells = <1>; 121 #size-cells = <1>;
122 ranges = <0x00000000 0x80000000 0x40000000>; 122 ranges = <0x00000000 0x80000000 0x40000000>;
123 device_type = "memory"; 123 device_type = "memory";
124 reg = <0x00000000 0x20000000>; /* 512MiB */ 124 reg = <0x80000000 0x20000000>; /* 512MiB */
125 }; 125 };
126}; 126};
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts
index 911f069e0540..b0eb0e7fe21d 100644
--- a/arch/arc/boot/dts/nsim_hs.dts
+++ b/arch/arc/boot/dts/nsim_hs.dts
@@ -11,8 +11,16 @@
11 11
12/ { 12/ {
13 compatible = "snps,nsim_hs"; 13 compatible = "snps,nsim_hs";
14 #address-cells = <2>;
15 #size-cells = <2>;
14 interrupt-parent = <&core_intc>; 16 interrupt-parent = <&core_intc>;
15 17
18 memory {
19 device_type = "memory";
20 reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */
21 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
22 };
23
16 chosen { 24 chosen {
17 bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8"; 25 bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
18 }; 26 };
@@ -26,8 +34,8 @@
26 #address-cells = <1>; 34 #address-cells = <1>;
27 #size-cells = <1>; 35 #size-cells = <1>;
28 36
29 /* child and parent address space 1:1 mapped */ 37 /* only perip space at end of low mem accessible */
30 ranges; 38 ranges = <0x80000000 0x0 0x80000000 0x80000000>;
31 39
32 core_intc: core-interrupt-controller { 40 core_intc: core-interrupt-controller {
33 compatible = "snps,archs-intc"; 41 compatible = "snps,archs-intc";
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index a870bdd5e404..296d371a335c 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -32,6 +32,6 @@
32 32
33 memory { 33 memory {
34 device_type = "memory"; 34 device_type = "memory";
35 reg = <0x00000000 0x10000000>; /* 256M */ 35 reg = <0x80000000 0x10000000>; /* 256M */
36 }; 36 };
37}; 37};
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
index 9393fd902f0d..84226bd48baf 100644
--- a/arch/arc/boot/dts/vdk_axc003.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003.dtsi
@@ -56,6 +56,6 @@
56 #size-cells = <1>; 56 #size-cells = <1>;
57 ranges = <0x00000000 0x80000000 0x40000000>; 57 ranges = <0x00000000 0x80000000 0x40000000>;
58 device_type = "memory"; 58 device_type = "memory";
59 reg = <0x00000000 0x20000000>; /* 512MiB */ 59 reg = <0x80000000 0x20000000>; /* 512MiB */
60 }; 60 };
61}; 61};
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
index 9bee8ed09eb0..31f0fb5fc91d 100644
--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
@@ -71,6 +71,6 @@
71 #size-cells = <1>; 71 #size-cells = <1>;
72 ranges = <0x00000000 0x80000000 0x40000000>; 72 ranges = <0x00000000 0x80000000 0x40000000>;
73 device_type = "memory"; 73 device_type = "memory";
74 reg = <0x00000000 0x20000000>; /* 512MiB */ 74 reg = <0x80000000 0x20000000>; /* 512MiB */
75 }; 75 };
76}; 76};
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 562dac6a7f78..c92c0ef1e9d2 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -89,7 +89,6 @@ CONFIG_MMC=y
89CONFIG_MMC_SDHCI=y 89CONFIG_MMC_SDHCI=y
90CONFIG_MMC_SDHCI_PLTFM=y 90CONFIG_MMC_SDHCI_PLTFM=y
91CONFIG_MMC_DW=y 91CONFIG_MMC_DW=y
92CONFIG_MMC_DW_IDMAC=y
93# CONFIG_IOMMU_SUPPORT is not set 92# CONFIG_IOMMU_SUPPORT is not set
94CONFIG_EXT3_FS=y 93CONFIG_EXT3_FS=y
95CONFIG_EXT4_FS=y 94CONFIG_EXT4_FS=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 83a6d8d5cc58..cfac24e0e7b6 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -95,7 +95,6 @@ CONFIG_MMC=y
95CONFIG_MMC_SDHCI=y 95CONFIG_MMC_SDHCI=y
96CONFIG_MMC_SDHCI_PLTFM=y 96CONFIG_MMC_SDHCI_PLTFM=y
97CONFIG_MMC_DW=y 97CONFIG_MMC_DW=y
98CONFIG_MMC_DW_IDMAC=y
99# CONFIG_IOMMU_SUPPORT is not set 98# CONFIG_IOMMU_SUPPORT is not set
100CONFIG_EXT3_FS=y 99CONFIG_EXT3_FS=y
101CONFIG_EXT4_FS=y 100CONFIG_EXT4_FS=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index f1e1c84e0dda..9922a118a15a 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -96,7 +96,6 @@ CONFIG_MMC=y
96CONFIG_MMC_SDHCI=y 96CONFIG_MMC_SDHCI=y
97CONFIG_MMC_SDHCI_PLTFM=y 97CONFIG_MMC_SDHCI_PLTFM=y
98CONFIG_MMC_DW=y 98CONFIG_MMC_DW=y
99CONFIG_MMC_DW_IDMAC=y
100# CONFIG_IOMMU_SUPPORT is not set 99# CONFIG_IOMMU_SUPPORT is not set
101CONFIG_EXT3_FS=y 100CONFIG_EXT3_FS=y
102CONFIG_EXT4_FS=y 101CONFIG_EXT4_FS=y
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index d8023bc8d1ad..7fac7d85ed6a 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -120,7 +120,7 @@
120 120
121/* gcc builtin sr needs reg param to be long immediate */ 121/* gcc builtin sr needs reg param to be long immediate */
122#define write_aux_reg(reg_immed, val) \ 122#define write_aux_reg(reg_immed, val) \
123 __builtin_arc_sr((unsigned int)val, reg_immed) 123 __builtin_arc_sr((unsigned int)(val), reg_immed)
124 124
125#else 125#else
126 126
@@ -327,8 +327,8 @@ struct bcr_generic {
327 */ 327 */
328 328
329struct cpuinfo_arc_mmu { 329struct cpuinfo_arc_mmu {
330 unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, u_dtlb:6, u_itlb:6; 330 unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1;
331 unsigned int num_tlb:16, sets:12, ways:4; 331 unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8;
332}; 332};
333 333
334struct cpuinfo_arc_cache { 334struct cpuinfo_arc_cache {
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index c3ecda023e3a..7730d302cadb 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -17,11 +17,11 @@
17#include <asm/barrier.h> 17#include <asm/barrier.h>
18#include <asm/smp.h> 18#include <asm/smp.h>
19 19
20#define atomic_read(v) ((v)->counter) 20#define atomic_read(v) READ_ONCE((v)->counter)
21 21
22#ifdef CONFIG_ARC_HAS_LLSC 22#ifdef CONFIG_ARC_HAS_LLSC
23 23
24#define atomic_set(v, i) (((v)->counter) = (i)) 24#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
25 25
26#ifdef CONFIG_ARC_STAR_9000923308 26#ifdef CONFIG_ARC_STAR_9000923308
27 27
@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
107#ifndef CONFIG_SMP 107#ifndef CONFIG_SMP
108 108
109 /* violating atomic_xxx API locking protocol in UP for optimization sake */ 109 /* violating atomic_xxx API locking protocol in UP for optimization sake */
110#define atomic_set(v, i) (((v)->counter) = (i)) 110#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
111 111
112#else 112#else
113 113
@@ -125,7 +125,7 @@ static inline void atomic_set(atomic_t *v, int i)
125 unsigned long flags; 125 unsigned long flags;
126 126
127 atomic_ops_lock(flags); 127 atomic_ops_lock(flags);
128 v->counter = i; 128 WRITE_ONCE(v->counter, i);
129 atomic_ops_unlock(flags); 129 atomic_ops_unlock(flags);
130} 130}
131 131
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index e23ea6e7633a..abf06e81c929 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -65,6 +65,7 @@ extern int ioc_exists;
65#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4) 65#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4)
66#define ARC_REG_IC_PTAG 0x1E 66#define ARC_REG_IC_PTAG 0x1E
67#endif 67#endif
68#define ARC_REG_IC_PTAG_HI 0x1F
68 69
69/* Bit val in IC_CTRL */ 70/* Bit val in IC_CTRL */
70#define IC_CTRL_CACHE_DISABLE 0x1 71#define IC_CTRL_CACHE_DISABLE 0x1
@@ -77,6 +78,7 @@ extern int ioc_exists;
77#define ARC_REG_DC_FLSH 0x4B 78#define ARC_REG_DC_FLSH 0x4B
78#define ARC_REG_DC_FLDL 0x4C 79#define ARC_REG_DC_FLDL 0x4C
79#define ARC_REG_DC_PTAG 0x5C 80#define ARC_REG_DC_PTAG 0x5C
81#define ARC_REG_DC_PTAG_HI 0x5F
80 82
81/* Bit val in DC_CTRL */ 83/* Bit val in DC_CTRL */
82#define DC_CTRL_INV_MODE_FLUSH 0x40 84#define DC_CTRL_INV_MODE_FLUSH 0x40
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index 0992d3dbcc65..fbe3587c4f36 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -31,10 +31,10 @@
31 31
32void flush_cache_all(void); 32void flush_cache_all(void);
33 33
34void flush_icache_range(unsigned long start, unsigned long end); 34void flush_icache_range(unsigned long kstart, unsigned long kend);
35void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); 35void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
36void __inv_icache_page(unsigned long paddr, unsigned long vaddr); 36void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
37void __flush_dcache_page(unsigned long paddr, unsigned long vaddr); 37void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
38 38
39#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 39#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
40 40
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 415443c2a8c4..1aff3be91075 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -110,13 +110,12 @@
110 110
111.macro FAKE_RET_FROM_EXCPN 111.macro FAKE_RET_FROM_EXCPN
112 112
113 ld r9, [sp, PT_status32] 113 lr r9, [status32]
114 bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK) 114 bclr r9, r9, STATUS_AE_BIT
115 bset r9, r9, STATUS_L_BIT 115 or r9, r9, (STATUS_E1_MASK|STATUS_E2_MASK)
116 sr r9, [erstatus] 116 sr r9, [erstatus]
117 mov r9, 55f 117 mov r9, 55f
118 sr r9, [eret] 118 sr r9, [eret]
119
120 rtie 119 rtie
12155: 12055:
122.endm 121.endm
diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h
new file mode 100644
index 000000000000..b1585c96324a
--- /dev/null
+++ b/arch/arc/include/asm/highmem.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#ifndef _ASM_HIGHMEM_H
11#define _ASM_HIGHMEM_H
12
13#ifdef CONFIG_HIGHMEM
14
15#include <uapi/asm/page.h>
16#include <asm/kmap_types.h>
17
18/* start after vmalloc area */
19#define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
20#define FIXMAP_SIZE PGDIR_SIZE /* only 1 PGD worth */
21#define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS)
22#define FIXMAP_ADDR(nr) (FIXMAP_BASE + ((nr) << PAGE_SHIFT))
23
24/* start after fixmap area */
25#define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE)
26#define PKMAP_SIZE PGDIR_SIZE
27#define LAST_PKMAP (PKMAP_SIZE >> PAGE_SHIFT)
28#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
29#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
30#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
31
32#define kmap_prot PAGE_KERNEL
33
34
35#include <asm/cacheflush.h>
36
37extern void *kmap(struct page *page);
38extern void *kmap_high(struct page *page);
39extern void *kmap_atomic(struct page *page);
40extern void __kunmap_atomic(void *kvaddr);
41extern void kunmap_high(struct page *page);
42
43extern void kmap_init(void);
44
45static inline void flush_cache_kmaps(void)
46{
47 flush_cache_all();
48}
49
50static inline void kunmap(struct page *page)
51{
52 BUG_ON(in_interrupt());
53 if (!PageHighMem(page))
54 return;
55 kunmap_high(page);
56}
57
58
59#endif
60
61#endif
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
new file mode 100644
index 000000000000..c5094de86403
--- /dev/null
+++ b/arch/arc/include/asm/hugepage.h
@@ -0,0 +1,81 @@
1/*
2 * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9
10#ifndef _ASM_ARC_HUGEPAGE_H
11#define _ASM_ARC_HUGEPAGE_H
12
13#include <linux/types.h>
14#include <asm-generic/pgtable-nopmd.h>
15
16static inline pte_t pmd_pte(pmd_t pmd)
17{
18 return __pte(pmd_val(pmd));
19}
20
21static inline pmd_t pte_pmd(pte_t pte)
22{
23 return __pmd(pte_val(pte));
24}
25
26#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
27#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
28#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
29#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
30#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
31#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
32#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
33#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
34#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
35
36#define pmd_write(pmd) pte_write(pmd_pte(pmd))
37#define pmd_young(pmd) pte_young(pmd_pte(pmd))
38#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
39#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
40#define pmd_special(pmd) pte_special(pmd_pte(pmd))
41
42#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))
43
44#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
45#define pmd_trans_splitting(pmd) (pmd_trans_huge(pmd) && pmd_special(pmd))
46
47#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
48
49static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
50{
51 /*
52 * open-coded pte_modify() with additional retaining of HW_SZ bit
53 * so that pmd_trans_huge() remains true for this PMD
54 */
55 return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot));
56}
57
58static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
59 pmd_t *pmdp, pmd_t pmd)
60{
61 *pmdp = pmd;
62}
63
64extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
65 pmd_t *pmd);
66
67#define has_transparent_hugepage() 1
68
69/* Generic variants assume pgtable_t is struct page *, hence need for these */
70#define __HAVE_ARCH_PGTABLE_DEPOSIT
71extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
72 pgtable_t pgtable);
73
74#define __HAVE_ARCH_PGTABLE_WITHDRAW
75extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
76
77#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
78extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
79 unsigned long end);
80
81#endif
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index bc5103637326..4fd7d62a6e30 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -16,6 +16,7 @@
16#ifdef CONFIG_ISA_ARCOMPACT 16#ifdef CONFIG_ISA_ARCOMPACT
17#define TIMER0_IRQ 3 17#define TIMER0_IRQ 3
18#define TIMER1_IRQ 4 18#define TIMER1_IRQ 4
19#define IPI_IRQ (NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */
19#else 20#else
20#define TIMER0_IRQ 16 21#define TIMER0_IRQ 16
21#define TIMER1_IRQ 17 22#define TIMER1_IRQ 17
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index aa805575c320..d8c608174617 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -23,11 +23,13 @@
23#define STATUS_E2_BIT 2 /* Int 2 enable */ 23#define STATUS_E2_BIT 2 /* Int 2 enable */
24#define STATUS_A1_BIT 3 /* Int 1 active */ 24#define STATUS_A1_BIT 3 /* Int 1 active */
25#define STATUS_A2_BIT 4 /* Int 2 active */ 25#define STATUS_A2_BIT 4 /* Int 2 active */
26#define STATUS_AE_BIT 5 /* Exception active */
26 27
27#define STATUS_E1_MASK (1<<STATUS_E1_BIT) 28#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
28#define STATUS_E2_MASK (1<<STATUS_E2_BIT) 29#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
29#define STATUS_A1_MASK (1<<STATUS_A1_BIT) 30#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
30#define STATUS_A2_MASK (1<<STATUS_A2_BIT) 31#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
32#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
31#define STATUS_IE_MASK (STATUS_E1_MASK | STATUS_E2_MASK) 33#define STATUS_IE_MASK (STATUS_E1_MASK | STATUS_E2_MASK)
32 34
33/* Other Interrupt Handling related Aux regs */ 35/* Other Interrupt Handling related Aux regs */
@@ -91,7 +93,19 @@ static inline void arch_local_irq_restore(unsigned long flags)
91/* 93/*
92 * Unconditionally Enable IRQs 94 * Unconditionally Enable IRQs
93 */ 95 */
94extern void arch_local_irq_enable(void); 96static inline void arch_local_irq_enable(void)
97{
98 unsigned long temp;
99
100 __asm__ __volatile__(
101 " lr %0, [status32] \n"
102 " or %0, %0, %1 \n"
103 " flag %0 \n"
104 : "=&r"(temp)
105 : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
106 : "cc", "memory");
107}
108
95 109
96/* 110/*
97 * Unconditionally Disable IRQs 111 * Unconditionally Disable IRQs
diff --git a/arch/arc/include/asm/kmap_types.h b/arch/arc/include/asm/kmap_types.h
new file mode 100644
index 000000000000..f0d7f6acea4e
--- /dev/null
+++ b/arch/arc/include/asm/kmap_types.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#ifndef _ASM_KMAP_TYPES_H
11#define _ASM_KMAP_TYPES_H
12
13/*
14 * We primarily need to define KM_TYPE_NR here but that in turn
15 * is a function of PGDIR_SIZE etc.
16 * To avoid circular deps issue, put everything in asm/highmem.h
17 */
18#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index e8993a2be6c2..6ff657a904b6 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -23,11 +23,8 @@
23 * @dt_compat: Array of device tree 'compatible' strings 23 * @dt_compat: Array of device tree 'compatible' strings
24 * (XXX: although only 1st entry is looked at) 24 * (XXX: although only 1st entry is looked at)
25 * @init_early: Very early callback [called from setup_arch()] 25 * @init_early: Very early callback [called from setup_arch()]
26 * @init_irq: setup external IRQ controllers [called from init_IRQ()] 26 * @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP)
27 * @init_smp: for each CPU (e.g. setup IPI)
28 * [(M):init_IRQ(), (o):start_kernel_secondary()] 27 * [(M):init_IRQ(), (o):start_kernel_secondary()]
29 * @init_time: platform specific clocksource/clockevent registration
30 * [called from time_init()]
31 * @init_machine: arch initcall level callback (e.g. populate static 28 * @init_machine: arch initcall level callback (e.g. populate static
32 * platform devices or parse Devicetree) 29 * platform devices or parse Devicetree)
33 * @init_late: Late initcall level callback 30 * @init_late: Late initcall level callback
@@ -36,13 +33,10 @@
36struct machine_desc { 33struct machine_desc {
37 const char *name; 34 const char *name;
38 const char **dt_compat; 35 const char **dt_compat;
39
40 void (*init_early)(void); 36 void (*init_early)(void);
41 void (*init_irq)(void);
42#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
43 void (*init_smp)(unsigned int); 38 void (*init_cpu_smp)(unsigned int);
44#endif 39#endif
45 void (*init_time)(void);
46 void (*init_machine)(void); 40 void (*init_machine)(void);
47 void (*init_late)(void); 41 void (*init_late)(void);
48 42
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
index 52c11f0bb0e5..46f4e5351b2a 100644
--- a/arch/arc/include/asm/mcip.h
+++ b/arch/arc/include/asm/mcip.h
@@ -86,9 +86,6 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
86 __mcip_cmd(cmd, param); 86 __mcip_cmd(cmd, param);
87} 87}
88 88
89extern void mcip_init_early_smp(void);
90extern void mcip_init_smp(unsigned int cpu);
91
92#endif 89#endif
93 90
94#endif 91#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index 0f9c3eb5327e..b144d7ca7d20 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -24,6 +24,7 @@
24#if (CONFIG_ARC_MMU_VER < 4) 24#if (CONFIG_ARC_MMU_VER < 4)
25#define ARC_REG_TLBPD0 0x405 25#define ARC_REG_TLBPD0 0x405
26#define ARC_REG_TLBPD1 0x406 26#define ARC_REG_TLBPD1 0x406
27#define ARC_REG_TLBPD1HI 0 /* Dummy: allows code sharing with ARC700 */
27#define ARC_REG_TLBINDEX 0x407 28#define ARC_REG_TLBINDEX 0x407
28#define ARC_REG_TLBCOMMAND 0x408 29#define ARC_REG_TLBCOMMAND 0x408
29#define ARC_REG_PID 0x409 30#define ARC_REG_PID 0x409
@@ -31,6 +32,7 @@
31#else 32#else
32#define ARC_REG_TLBPD0 0x460 33#define ARC_REG_TLBPD0 0x460
33#define ARC_REG_TLBPD1 0x461 34#define ARC_REG_TLBPD1 0x461
35#define ARC_REG_TLBPD1HI 0x463
34#define ARC_REG_TLBINDEX 0x464 36#define ARC_REG_TLBINDEX 0x464
35#define ARC_REG_TLBCOMMAND 0x465 37#define ARC_REG_TLBCOMMAND 0x465
36#define ARC_REG_PID 0x468 38#define ARC_REG_PID 0x468
@@ -83,6 +85,11 @@ void arc_mmu_init(void);
83extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); 85extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
84void read_decode_mmu_bcr(void); 86void read_decode_mmu_bcr(void);
85 87
88static inline int is_pae40_enabled(void)
89{
90 return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
91}
92
86#endif /* !__ASSEMBLY__ */ 93#endif /* !__ASSEMBLY__ */
87 94
88#endif 95#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 9c8aa41e45c2..429957f1c236 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -43,7 +43,6 @@ typedef struct {
43typedef struct { 43typedef struct {
44 unsigned long pgprot; 44 unsigned long pgprot;
45} pgprot_t; 45} pgprot_t;
46typedef unsigned long pgtable_t;
47 46
48#define pte_val(x) ((x).pte) 47#define pte_val(x) ((x).pte)
49#define pgd_val(x) ((x).pgd) 48#define pgd_val(x) ((x).pgd)
@@ -57,20 +56,26 @@ typedef unsigned long pgtable_t;
57 56
58#else /* !STRICT_MM_TYPECHECKS */ 57#else /* !STRICT_MM_TYPECHECKS */
59 58
59#ifdef CONFIG_ARC_HAS_PAE40
60typedef unsigned long long pte_t;
61#else
60typedef unsigned long pte_t; 62typedef unsigned long pte_t;
63#endif
61typedef unsigned long pgd_t; 64typedef unsigned long pgd_t;
62typedef unsigned long pgprot_t; 65typedef unsigned long pgprot_t;
63typedef unsigned long pgtable_t;
64 66
65#define pte_val(x) (x) 67#define pte_val(x) (x)
66#define pgd_val(x) (x) 68#define pgd_val(x) (x)
67#define pgprot_val(x) (x) 69#define pgprot_val(x) (x)
68#define __pte(x) (x) 70#define __pte(x) (x)
71#define __pgd(x) (x)
69#define __pgprot(x) (x) 72#define __pgprot(x) (x)
70#define pte_pgprot(x) (x) 73#define pte_pgprot(x) (x)
71 74
72#endif 75#endif
73 76
77typedef pte_t * pgtable_t;
78
74#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT) 79#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
75 80
76#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) 81#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 81208bfd9dcb..86ed671286df 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -49,7 +49,7 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
49 49
50static inline int __get_order_pgd(void) 50static inline int __get_order_pgd(void)
51{ 51{
52 return get_order(PTRS_PER_PGD * 4); 52 return get_order(PTRS_PER_PGD * sizeof(pgd_t));
53} 53}
54 54
55static inline pgd_t *pgd_alloc(struct mm_struct *mm) 55static inline pgd_t *pgd_alloc(struct mm_struct *mm)
@@ -87,7 +87,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
87 87
88static inline int __get_order_pte(void) 88static inline int __get_order_pte(void)
89{ 89{
90 return get_order(PTRS_PER_PTE * 4); 90 return get_order(PTRS_PER_PTE * sizeof(pte_t));
91} 91}
92 92
93static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 93static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -107,10 +107,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
107 pgtable_t pte_pg; 107 pgtable_t pte_pg;
108 struct page *page; 108 struct page *page;
109 109
110 pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); 110 pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
111 if (!pte_pg) 111 if (!pte_pg)
112 return 0; 112 return 0;
113 memzero((void *)pte_pg, PTRS_PER_PTE * 4); 113 memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
114 page = virt_to_page(pte_pg); 114 page = virt_to_page(pte_pg);
115 if (!pgtable_page_ctor(page)) { 115 if (!pgtable_page_ctor(page)) {
116 __free_page(page); 116 __free_page(page);
@@ -128,12 +128,12 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
128static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) 128static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
129{ 129{
130 pgtable_page_dtor(virt_to_page(ptep)); 130 pgtable_page_dtor(virt_to_page(ptep));
131 free_pages(ptep, __get_order_pte()); 131 free_pages((unsigned long)ptep, __get_order_pte());
132} 132}
133 133
134#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) 134#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
135 135
136#define check_pgt_cache() do { } while (0) 136#define check_pgt_cache() do { } while (0)
137#define pmd_pgtable(pmd) pmd_page_vaddr(pmd) 137#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
138 138
139#endif /* _ASM_ARC_PGALLOC_H */ 139#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 1281718802f7..57af2f05ae84 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -38,6 +38,7 @@
38#include <asm/page.h> 38#include <asm/page.h>
39#include <asm/mmu.h> 39#include <asm/mmu.h>
40#include <asm-generic/pgtable-nopmd.h> 40#include <asm-generic/pgtable-nopmd.h>
41#include <linux/const.h>
41 42
42/************************************************************************** 43/**************************************************************************
43 * Page Table Flags 44 * Page Table Flags
@@ -60,7 +61,8 @@
60#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ 61#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
61#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ 62#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
62#define _PAGE_READ (1<<5) /* Page has user read perm (H) */ 63#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
63#define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */ 64#define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */
65#define _PAGE_SPECIAL (1<<7)
64#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ 66#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
65#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ 67#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
66 68
@@ -71,7 +73,8 @@
71#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ 73#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
72#define _PAGE_READ (1<<3) /* Page has user read perm (H) */ 74#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
73#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ 75#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
74#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ 76#define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */
77#define _PAGE_SPECIAL (1<<6)
75 78
76#if (CONFIG_ARC_MMU_VER >= 4) 79#if (CONFIG_ARC_MMU_VER >= 4)
77#define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */ 80#define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */
@@ -81,32 +84,33 @@
81#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ 84#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
82 85
83#if (CONFIG_ARC_MMU_VER >= 4) 86#if (CONFIG_ARC_MMU_VER >= 4)
84#define _PAGE_SZ (1<<10) /* Page Size indicator (H) */ 87#define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */
85#endif 88#endif
86 89
87#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr 90#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
88 usable for shared TLB entries (H) */ 91 usable for shared TLB entries (H) */
92
93#define _PAGE_UNUSED_BIT (1<<12)
89#endif 94#endif
90 95
91/* vmalloc permissions */ 96/* vmalloc permissions */
92#define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ 97#define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
93 _PAGE_GLOBAL | _PAGE_PRESENT) 98 _PAGE_GLOBAL | _PAGE_PRESENT)
94 99
95#ifdef CONFIG_ARC_CACHE_PAGES 100#ifndef CONFIG_ARC_CACHE_PAGES
96#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE 101#undef _PAGE_CACHEABLE
97#else 102#define _PAGE_CACHEABLE 0
98#define _PAGE_DEF_CACHEABLE (0)
99#endif 103#endif
100 104
101/* Helper for every "user" page 105#ifndef _PAGE_HW_SZ
102 * -kernel can R/W/X 106#define _PAGE_HW_SZ 0
103 * -by default cached, unless config otherwise 107#endif
104 * -present in memory 108
105 */ 109/* Defaults for every user page */
106#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) 110#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
107 111
108/* Set of bits not changed in pte_modify */ 112/* Set of bits not changed in pte_modify */
109#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) 113#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
110 114
111/* More Abbrevaited helpers */ 115/* More Abbrevaited helpers */
112#define PAGE_U_NONE __pgprot(___DEF) 116#define PAGE_U_NONE __pgprot(___DEF)
@@ -122,15 +126,20 @@
122 * user vaddr space - visible in all addr spaces, but kernel mode only 126 * user vaddr space - visible in all addr spaces, but kernel mode only
123 * Thus Global, all-kernel-access, no-user-access, cached 127 * Thus Global, all-kernel-access, no-user-access, cached
124 */ 128 */
125#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) 129#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
126 130
127/* ioremap */ 131/* ioremap */
128#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) 132#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
129 133
130/* Masks for actual TLB "PD"s */ 134/* Masks for actual TLB "PD"s */
131#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) 135#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
132#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) 136#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
137
138#ifdef CONFIG_ARC_HAS_PAE40
139#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
140#else
133#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) 141#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
142#endif
134 143
135/************************************************************************** 144/**************************************************************************
136 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) 145 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
@@ -191,26 +200,22 @@
191 200
192/* Optimal Sizing of Pg Tbl - based on MMU page size */ 201/* Optimal Sizing of Pg Tbl - based on MMU page size */
193#if defined(CONFIG_ARC_PAGE_SIZE_8K) 202#if defined(CONFIG_ARC_PAGE_SIZE_8K)
194#define BITS_FOR_PTE 8 203#define BITS_FOR_PTE 8 /* 11:8:13 */
195#elif defined(CONFIG_ARC_PAGE_SIZE_16K) 204#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
196#define BITS_FOR_PTE 8 205#define BITS_FOR_PTE 8 /* 10:8:14 */
197#elif defined(CONFIG_ARC_PAGE_SIZE_4K) 206#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
198#define BITS_FOR_PTE 9 207#define BITS_FOR_PTE 9 /* 11:9:12 */
199#endif 208#endif
200 209
201#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE) 210#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
202 211
203#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE) 212#define PGDIR_SHIFT (32 - BITS_FOR_PGD)
204#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */ 213#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
205#define PGDIR_MASK (~(PGDIR_SIZE-1)) 214#define PGDIR_MASK (~(PGDIR_SIZE-1))
206 215
207#ifdef __ASSEMBLY__ 216#define PTRS_PER_PTE _BITUL(BITS_FOR_PTE)
208#define PTRS_PER_PTE (1 << BITS_FOR_PTE) 217#define PTRS_PER_PGD _BITUL(BITS_FOR_PGD)
209#define PTRS_PER_PGD (1 << BITS_FOR_PGD) 218
210#else
211#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
212#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
213#endif
214/* 219/*
215 * Number of entries a user land program use. 220 * Number of entries a user land program use.
216 * TASK_SIZE is the maximum vaddr that can be used by a userland program. 221 * TASK_SIZE is the maximum vaddr that can be used by a userland program.
@@ -270,15 +275,10 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
270 (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \ 275 (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
271 PAGE_SHIFT))) 276 PAGE_SHIFT)))
272 277
273#define mk_pte(page, pgprot) \ 278#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
274({ \
275 pte_t pte; \
276 pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
277 pte; \
278})
279
280#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 279#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
281#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) 280#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
281 pgprot_val(prot)))
282#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 282#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
283 283
284/* 284/*
@@ -295,23 +295,26 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
295/* Zoo of pte_xxx function */ 295/* Zoo of pte_xxx function */
296#define pte_read(pte) (pte_val(pte) & _PAGE_READ) 296#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
297#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) 297#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
298#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED) 298#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
299#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) 299#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
300#define pte_special(pte) (0) 300#define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
301 301
302#define PTE_BIT_FUNC(fn, op) \ 302#define PTE_BIT_FUNC(fn, op) \
303 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 303 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
304 304
305PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
305PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); 306PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
306PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); 307PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
307PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED)); 308PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
308PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED)); 309PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
309PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED)); 310PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
310PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); 311PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
311PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE)); 312PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
312PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE)); 313PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
314PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
315PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
313 316
314static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 317#define __HAVE_ARCH_PTE_SPECIAL
315 318
316static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 319static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
317{ 320{
@@ -357,7 +360,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
357#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) 360#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
358#endif 361#endif
359 362
360extern void paging_init(void);
361extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); 363extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
362void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 364void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
363 pte_t *ptep); 365 pte_t *ptep);
@@ -383,6 +385,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
383 * remap a physical page `pfn' of size `size' with page protection `prot' 385 * remap a physical page `pfn' of size `size' with page protection `prot'
384 * into virtual address `from' 386 * into virtual address `from'
385 */ 387 */
388#ifdef CONFIG_TRANSPARENT_HUGEPAGE
389#include <asm/hugepage.h>
390#endif
391
386#include <asm-generic/pgtable.h> 392#include <asm-generic/pgtable.h>
387 393
388/* to cope with aliasing VIPT cache */ 394/* to cope with aliasing VIPT cache */
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index ee682d8e0213..44545354e9e8 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -114,7 +114,12 @@ extern unsigned int get_wchan(struct task_struct *p);
114 * ----------------------------------------------------------------------------- 114 * -----------------------------------------------------------------------------
115 */ 115 */
116#define VMALLOC_START 0x70000000 116#define VMALLOC_START 0x70000000
117#define VMALLOC_SIZE (PAGE_OFFSET - VMALLOC_START) 117
118/*
119 * 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter
120 * See asm/highmem.h for details
121 */
122#define VMALLOC_SIZE (PAGE_OFFSET - VMALLOC_START - PGDIR_SIZE * 4)
118#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 123#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
119 124
120#define USER_KERNEL_GUTTER 0x10000000 125#define USER_KERNEL_GUTTER 0x10000000
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 6e3ef5ba4f74..307846691be6 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -33,4 +33,11 @@ extern int root_mountflags, end_mem;
33void setup_processor(void); 33void setup_processor(void);
34void __init setup_arch_memory(void); 34void __init setup_arch_memory(void);
35 35
36/* Helpers used in arc_*_mumbojumbo routines */
37#define IS_AVAIL1(v, s) ((v) ? s : "")
38#define IS_DISABLED_RUN(v) ((v) ? "" : "(disabled) ")
39#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
40#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
41#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
42
36#endif /* __ASMARC_SETUP_H */ 43#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 3845b9e94f69..133c867d15af 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -45,12 +45,19 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
45 * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP 45 * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
46 * 46 *
47 * @info: SoC SMP specific info for /proc/cpuinfo etc 47 * @info: SoC SMP specific info for /proc/cpuinfo etc
48 * @init_early_smp: A SMP specific h/w block can init itself
49 * Could be common across platforms so not covered by
50 * mach_desc->init_early()
51 * @init_irq_cpu: Called for each core so SMP h/w block driver can do
52 * any needed setup per cpu (e.g. IPI request)
48 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) 53 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
49 * @ipi_send: To send IPI to a @cpu 54 * @ipi_send: To send IPI to a @cpu
50 * @ips_clear: To clear IPI received at @irq 55 * @ips_clear: To clear IPI received at @irq
51 */ 56 */
52struct plat_smp_ops { 57struct plat_smp_ops {
53 const char *info; 58 const char *info;
59 void (*init_early_smp)(void);
60 void (*init_irq_cpu)(int cpu);
54 void (*cpu_kick)(int cpu, unsigned long pc); 61 void (*cpu_kick)(int cpu, unsigned long pc);
55 void (*ipi_send)(int cpu); 62 void (*ipi_send)(int cpu);
56 void (*ipi_clear)(int irq); 63 void (*ipi_clear)(int irq);
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
index 71c7b2e4b874..1fe9c8c80280 100644
--- a/arch/arc/include/asm/tlbflush.h
+++ b/arch/arc/include/asm/tlbflush.h
@@ -17,6 +17,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
17void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); 17void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
18void local_flush_tlb_range(struct vm_area_struct *vma, 18void local_flush_tlb_range(struct vm_area_struct *vma,
19 unsigned long start, unsigned long end); 19 unsigned long start, unsigned long end);
20void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
21 unsigned long end);
20 22
21#ifndef CONFIG_SMP 23#ifndef CONFIG_SMP
22#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) 24#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
@@ -24,6 +26,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma,
24#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e) 26#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
25#define flush_tlb_all() local_flush_tlb_all() 27#define flush_tlb_all() local_flush_tlb_all()
26#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 28#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
29#define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e)
27#else 30#else
28extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 31extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
29 unsigned long end); 32 unsigned long end);
@@ -31,5 +34,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
31extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 34extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
32extern void flush_tlb_all(void); 35extern void flush_tlb_all(void);
33extern void flush_tlb_mm(struct mm_struct *mm); 36extern void flush_tlb_mm(struct mm_struct *mm);
37extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
38
34#endif /* CONFIG_SMP */ 39#endif /* CONFIG_SMP */
35#endif 40#endif
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
index 9d129a2a1351..059aff38f10a 100644
--- a/arch/arc/include/uapi/asm/page.h
+++ b/arch/arc/include/uapi/asm/page.h
@@ -9,6 +9,8 @@
9#ifndef _UAPI__ASM_ARC_PAGE_H 9#ifndef _UAPI__ASM_ARC_PAGE_H
10#define _UAPI__ASM_ARC_PAGE_H 10#define _UAPI__ASM_ARC_PAGE_H
11 11
12#include <linux/const.h>
13
12/* PAGE_SHIFT determines the page size */ 14/* PAGE_SHIFT determines the page size */
13#if defined(CONFIG_ARC_PAGE_SIZE_16K) 15#if defined(CONFIG_ARC_PAGE_SIZE_16K)
14#define PAGE_SHIFT 14 16#define PAGE_SHIFT 14
@@ -25,13 +27,8 @@
25#define PAGE_SHIFT 13 27#define PAGE_SHIFT 13
26#endif 28#endif
27 29
28#ifdef __ASSEMBLY__ 30#define PAGE_SIZE _BITUL(PAGE_SHIFT) /* Default 8K */
29#define PAGE_SIZE (1 << PAGE_SHIFT) 31#define PAGE_OFFSET _AC(0x80000000, UL) /* Kernel starts at 2G onwrds */
30#define PAGE_OFFSET (0x80000000)
31#else
32#define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */
33#define PAGE_OFFSET (0x80000000UL) /* Kernel starts at 2G onwards */
34#endif
35 32
36#define PAGE_MASK (~(PAGE_SIZE-1)) 33#define PAGE_MASK (~(PAGE_SIZE-1))
37 34
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 8fa76567e402..445e63a10754 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -24,7 +24,7 @@
24 .align 4 24 .align 4
25 25
26# Initial 16 slots are Exception Vectors 26# Initial 16 slots are Exception Vectors
27VECTOR stext ; Restart Vector (jump to entry point) 27VECTOR res_service ; Reset Vector
28VECTOR mem_service ; Mem exception 28VECTOR mem_service ; Mem exception
29VECTOR instr_service ; Instrn Error 29VECTOR instr_service ; Instrn Error
30VECTOR EV_MachineCheck ; Fatal Machine check 30VECTOR EV_MachineCheck ; Fatal Machine check
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 15d457b4403a..59f52035b4ea 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -86,7 +86,7 @@
86 */ 86 */
87 87
88; ********* Critical System Events ********************** 88; ********* Critical System Events **********************
89VECTOR res_service ; 0x0, Restart Vector (0x0) 89VECTOR res_service ; 0x0, Reset Vector (0x0)
90VECTOR mem_service ; 0x8, Mem exception (0x1) 90VECTOR mem_service ; 0x8, Mem exception (0x1)
91VECTOR instr_service ; 0x10, Instrn Error (0x2) 91VECTOR instr_service ; 0x10, Instrn Error (0x2)
92 92
@@ -155,13 +155,9 @@ int2_saved_reg:
155; --------------------------------------------- 155; ---------------------------------------------
156 .section .text, "ax",@progbits 156 .section .text, "ax",@progbits
157 157
158res_service: ; processor restart
159 flag 0x1 ; not implemented
160 nop
161 nop
162 158
163reserved: ; processor restart 159reserved:
164 rtie ; jump to processor initializations 160 flag 1 ; Unexpected event, halt
165 161
166;##################### Interrupt Handling ############################## 162;##################### Interrupt Handling ##############################
167 163
@@ -175,12 +171,25 @@ ENTRY(handle_interrupt_level2)
175 171
176 ;------------------------------------------------------ 172 ;------------------------------------------------------
177 ; if L2 IRQ interrupted a L1 ISR, disable preemption 173 ; if L2 IRQ interrupted a L1 ISR, disable preemption
174 ;
175 ; This is to avoid a potential L1-L2-L1 scenario
176 ; -L1 IRQ taken
177 ; -L2 interrupts L1 (before L1 ISR could run)
178 ; -preemption off IRQ, user task in syscall picked to run
179 ; -RTIE to userspace
180 ; Returns from L2 context fine
181 ; But both L1 and L2 re-enabled, so another L1 can be taken
182 ; while prev L1 is still unserviced
183 ;
178 ;------------------------------------------------------ 184 ;------------------------------------------------------
179 185
186 ; L2 interrupting L1 implies both L2 and L1 active
187 ; However both A2 and A1 are NOT set in STATUS32, thus
188 ; need to check STATUS32_L2 to determine if L1 was active
189
180 ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) 190 ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
181 bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal 191 bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
182 192
183 ; A1 is set in status32_l2
184 ; bump thread_info->preempt_count (Disable preemption) 193 ; bump thread_info->preempt_count (Disable preemption)
185 GET_CURR_THR_INFO_FROM_SP r10 194 GET_CURR_THR_INFO_FROM_SP r10
186 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] 195 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
@@ -320,11 +329,10 @@ END(call_do_page_fault)
320 ; Note that we use realtime STATUS32 (not pt_regs->status32) to 329 ; Note that we use realtime STATUS32 (not pt_regs->status32) to
321 ; decide that. 330 ; decide that.
322 331
323 ; if Returning from Exception 332 and.f 0, r10, (STATUS_A1_MASK|STATUS_A2_MASK)
324 btst r10, STATUS_AE_BIT 333 bz .Lexcep_or_pure_K_ret
325 bnz .Lexcep_ret
326 334
327 ; Not Exception so maybe Interrupts (Level 1 or 2) 335 ; Returning from Interrupts (Level 1 or 2)
328 336
329#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS 337#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
330 338
@@ -365,8 +373,7 @@ END(call_do_page_fault)
365 st r9, [r10, THREAD_INFO_PREEMPT_COUNT] 373 st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
366 374
367149: 375149:
368 ;return from level 2 376 INTERRUPT_EPILOGUE 2 ; return from level 2 interrupt
369 INTERRUPT_EPILOGUE 2
370debug_marker_l2: 377debug_marker_l2:
371 rtie 378 rtie
372 379
@@ -374,15 +381,11 @@ not_level2_interrupt:
374 381
375#endif 382#endif
376 383
377 bbit0 r10, STATUS_A1_BIT, .Lpure_k_mode_ret 384 INTERRUPT_EPILOGUE 1 ; return from level 1 interrupt
378
379 ;return from level 1
380 INTERRUPT_EPILOGUE 1
381debug_marker_l1: 385debug_marker_l1:
382 rtie 386 rtie
383 387
384.Lexcep_ret: 388.Lexcep_or_pure_K_ret:
385.Lpure_k_mode_ret:
386 389
387 ;this case is for syscalls or Exceptions or pure kernel mode 390 ;this case is for syscalls or Exceptions or pure kernel mode
388 391
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 812f95e6ae69..689dd867fdff 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -50,28 +50,37 @@
50.endm 50.endm
51 51
52 .section .init.text, "ax",@progbits 52 .section .init.text, "ax",@progbits
53 .type stext, @function 53
54 .globl stext 54;----------------------------------------------------------------
55stext: 55; Default Reset Handler (jumped into from Reset vector)
56 ;------------------------------------------------------------------- 56; - Don't clobber r0,r1,r2 as they might have u-boot provided args
57 ; Don't clobber r0-r2 yet. It might have bootloader provided info 57; - Platforms can override this weak version if needed
58 ;------------------------------------------------------------------- 58;----------------------------------------------------------------
59WEAK(res_service)
60 j stext
61END(res_service)
62
63;----------------------------------------------------------------
64; Kernel Entry point
65;----------------------------------------------------------------
66ENTRY(stext)
59 67
60 CPU_EARLY_SETUP 68 CPU_EARLY_SETUP
61 69
62#ifdef CONFIG_SMP 70#ifdef CONFIG_SMP
63 ; Ensure Boot (Master) proceeds. Others wait in platform dependent way
64 ; IDENTITY Reg [ 3 2 1 0 ]
65 ; (cpu-id) ^^^ => Zero for UP ARC700
66 ; => #Core-ID if SMP (Master 0)
67 ; Note that non-boot CPUs might not land here if halt-on-reset and
68 ; instead breath life from @first_lines_of_secondary, but we still
69 ; need to make sure only boot cpu takes this path.
70 GET_CPU_ID r5 71 GET_CPU_ID r5
71 cmp r5, 0 72 cmp r5, 0
72 mov.ne r0, r5 73 mov.nz r0, r5
73 jne arc_platform_smp_wait_to_boot 74#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
75 ; Non-Master can proceed as system would be booted sufficiently
76 jnz first_lines_of_secondary
77#else
78 ; Non-Masters wait for Master to boot enough and bring them up
79 jnz arc_platform_smp_wait_to_boot
74#endif 80#endif
81 ; Master falls thru
82#endif
83
75 ; Clear BSS before updating any globals 84 ; Clear BSS before updating any globals
76 ; XXX: use ZOL here 85 ; XXX: use ZOL here
77 mov r5, __bss_start 86 mov r5, __bss_start
@@ -102,18 +111,14 @@ stext:
102 GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) 111 GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
103 112
104 j start_kernel ; "C" entry point 113 j start_kernel ; "C" entry point
114END(stext)
105 115
106#ifdef CONFIG_SMP 116#ifdef CONFIG_SMP
107;---------------------------------------------------------------- 117;----------------------------------------------------------------
108; First lines of code run by secondary before jumping to 'C' 118; First lines of code run by secondary before jumping to 'C'
109;---------------------------------------------------------------- 119;----------------------------------------------------------------
110 .section .text, "ax",@progbits 120 .section .text, "ax",@progbits
111 .type first_lines_of_secondary, @function 121ENTRY(first_lines_of_secondary)
112 .globl first_lines_of_secondary
113
114first_lines_of_secondary:
115
116 CPU_EARLY_SETUP
117 122
118 ; setup per-cpu idle task as "current" on this CPU 123 ; setup per-cpu idle task as "current" on this CPU
119 ld r0, [@secondary_idle_tsk] 124 ld r0, [@secondary_idle_tsk]
@@ -126,5 +131,5 @@ first_lines_of_secondary:
126 GET_TSK_STACK_BASE r0, sp 131 GET_TSK_STACK_BASE r0, sp
127 132
128 j start_kernel_secondary 133 j start_kernel_secondary
129 134END(first_lines_of_secondary)
130#endif 135#endif
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index 039fac30b5c1..06bcedf19b62 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -79,17 +79,16 @@ static struct irq_chip onchip_intc = {
79static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, 79static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
80 irq_hw_number_t hw) 80 irq_hw_number_t hw)
81{ 81{
82 /* 82 switch (irq) {
83 * XXX: the IPI IRQ needs to be handled like TIMER too. However ARC core 83 case TIMER0_IRQ:
84 * code doesn't own it (like TIMER0). ISS IDU / ezchip define it 84#ifdef CONFIG_SMP
85 * in platform header which can't be included here as it goes 85 case IPI_IRQ:
86 * against multi-platform image philisophy 86#endif
87 */
88 if (irq == TIMER0_IRQ)
89 irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); 87 irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
90 else 88 break;
89 default:
91 irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); 90 irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
92 91 }
93 return 0; 92 return 0;
94} 93}
95 94
@@ -148,78 +147,15 @@ IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
148 147
149void arch_local_irq_enable(void) 148void arch_local_irq_enable(void)
150{ 149{
151
152 unsigned long flags = arch_local_save_flags(); 150 unsigned long flags = arch_local_save_flags();
153 151
154 /* Allow both L1 and L2 at the onset */ 152 if (flags & STATUS_A2_MASK)
155 flags |= (STATUS_E1_MASK | STATUS_E2_MASK); 153 flags |= STATUS_E2_MASK;
156 154 else if (flags & STATUS_A1_MASK)
157 /* Called from hard ISR (between irq_enter and irq_exit) */ 155 flags |= STATUS_E1_MASK;
158 if (in_irq()) {
159
160 /* If in L2 ISR, don't re-enable any further IRQs as this can
161 * cause IRQ priorities to get upside down. e.g. it could allow
162 * L1 be taken while in L2 hard ISR which is wrong not only in
163 * theory, it can also cause the dreaded L1-L2-L1 scenario
164 */
165 if (flags & STATUS_A2_MASK)
166 flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
167
168 /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
169 else if (flags & STATUS_A1_MASK)
170 flags &= ~(STATUS_E1_MASK);
171 }
172
173 /* called from soft IRQ, ideally we want to re-enable all levels */
174
175 else if (in_softirq()) {
176
177 /* However if this is case of L1 interrupted by L2,
178 * re-enabling both may cause whaco L1-L2-L1 scenario
179 * because ARC700 allows level 1 to interrupt an active L2 ISR
180 * Thus we disable both
181 * However some code, executing in soft ISR wants some IRQs
182 * to be enabled so we re-enable L2 only
183 *
184 * How do we determine L1 intr by L2
185 * -A2 is set (means in L2 ISR)
186 * -E1 is set in this ISR's pt_regs->status32 which is
187 * saved copy of status32_l2 when l2 ISR happened
188 */
189 struct pt_regs *pt = get_irq_regs();
190
191 if ((flags & STATUS_A2_MASK) && pt &&
192 (pt->status32 & STATUS_A1_MASK)) {
193 /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
194 flags &= ~(STATUS_E1_MASK);
195 }
196 }
197 156
198 arch_local_irq_restore(flags); 157 arch_local_irq_restore(flags);
199} 158}
200 159
201#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
202
203/*
204 * Simpler version for only 1 level of interrupt
205 * Here we only Worry about Level 1 Bits
206 */
207void arch_local_irq_enable(void)
208{
209 unsigned long flags;
210
211 /*
212 * ARC IDE Drivers tries to re-enable interrupts from hard-isr
213 * context which is simply wrong
214 */
215 if (in_irq()) {
216 WARN_ONCE(1, "IRQ enabled from hard-isr");
217 return;
218 }
219
220 flags = arch_local_save_flags();
221 flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
222 arch_local_irq_restore(flags);
223}
224#endif
225EXPORT_SYMBOL(arch_local_irq_enable); 160EXPORT_SYMBOL(arch_local_irq_enable);
161#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 2989a7bcf8a8..2ee226546c6a 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -10,6 +10,7 @@
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/irqchip.h> 11#include <linux/irqchip.h>
12#include <asm/mach_desc.h> 12#include <asm/mach_desc.h>
13#include <asm/smp.h>
13 14
14/* 15/*
15 * Late Interrupt system init called from start_kernel for Boot CPU only 16 * Late Interrupt system init called from start_kernel for Boot CPU only
@@ -19,17 +20,20 @@
19 */ 20 */
20void __init init_IRQ(void) 21void __init init_IRQ(void)
21{ 22{
22 /* Any external intc can be setup here */ 23 /*
23 if (machine_desc->init_irq) 24 * process the entire interrupt tree in one go
24 machine_desc->init_irq(); 25 * Any external intc will be setup provided DT chains them
25 26 * properly
26 /* process the entire interrupt tree in one go */ 27 */
27 irqchip_init(); 28 irqchip_init();
28 29
29#ifdef CONFIG_SMP 30#ifdef CONFIG_SMP
30 /* Master CPU can initialize it's side of IPI */ 31 /* a SMP H/w block could do IPI IRQ request here */
31 if (machine_desc->init_smp) 32 if (plat_smp_ops.init_irq_cpu)
32 machine_desc->init_smp(smp_processor_id()); 33 plat_smp_ops.init_irq_cpu(smp_processor_id());
34
35 if (machine_desc->init_cpu_smp)
36 machine_desc->init_cpu_smp(smp_processor_id());
33#endif 37#endif
34} 38}
35 39
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 4ffd1855f1bd..74a9b074ac3e 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -12,20 +12,14 @@
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <asm/mcip.h> 14#include <asm/mcip.h>
15#include <asm/setup.h>
15 16
16static char smp_cpuinfo_buf[128]; 17static char smp_cpuinfo_buf[128];
17static int idu_detected; 18static int idu_detected;
18 19
19static DEFINE_RAW_SPINLOCK(mcip_lock); 20static DEFINE_RAW_SPINLOCK(mcip_lock);
20 21
21/* 22static void mcip_setup_per_cpu(int cpu)
22 * Any SMP specific init any CPU does when it comes up.
23 * Here we setup the CPU to enable Inter-Processor-Interrupts
24 * Called for each CPU
25 * -Master : init_IRQ()
26 * -Other(s) : start_kernel_secondary()
27 */
28void mcip_init_smp(unsigned int cpu)
29{ 23{
30 smp_ipi_irq_setup(cpu, IPI_IRQ); 24 smp_ipi_irq_setup(cpu, IPI_IRQ);
31} 25}
@@ -96,34 +90,8 @@ static void mcip_ipi_clear(int irq)
96#endif 90#endif
97} 91}
98 92
99volatile int wake_flag; 93static void mcip_probe_n_setup(void)
100
101static void mcip_wakeup_cpu(int cpu, unsigned long pc)
102{
103 BUG_ON(cpu == 0);
104 wake_flag = cpu;
105}
106
107void arc_platform_smp_wait_to_boot(int cpu)
108{ 94{
109 while (wake_flag != cpu)
110 ;
111
112 wake_flag = 0;
113 __asm__ __volatile__("j @first_lines_of_secondary \n");
114}
115
116struct plat_smp_ops plat_smp_ops = {
117 .info = smp_cpuinfo_buf,
118 .cpu_kick = mcip_wakeup_cpu,
119 .ipi_send = mcip_ipi_send,
120 .ipi_clear = mcip_ipi_clear,
121};
122
123void mcip_init_early_smp(void)
124{
125#define IS_AVAIL1(var, str) ((var) ? str : "")
126
127 struct mcip_bcr { 95 struct mcip_bcr {
128#ifdef CONFIG_CPU_BIG_ENDIAN 96#ifdef CONFIG_CPU_BIG_ENDIAN
129 unsigned int pad3:8, 97 unsigned int pad3:8,
@@ -161,6 +129,14 @@ void mcip_init_early_smp(void)
161 panic("kernel trying to use non-existent GRTC\n"); 129 panic("kernel trying to use non-existent GRTC\n");
162} 130}
163 131
132struct plat_smp_ops plat_smp_ops = {
133 .info = smp_cpuinfo_buf,
134 .init_early_smp = mcip_probe_n_setup,
135 .init_irq_cpu = mcip_setup_per_cpu,
136 .ipi_send = mcip_ipi_send,
137 .ipi_clear = mcip_ipi_clear,
138};
139
164/*************************************************************************** 140/***************************************************************************
165 * ARCv2 Interrupt Distribution Unit (IDU) 141 * ARCv2 Interrupt Distribution Unit (IDU)
166 * 142 *
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index cabde9dc0696..c33e77c0ad3e 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -160,10 +160,6 @@ static const struct cpuinfo_data arc_cpu_tbl[] = {
160 { {0x00, NULL } } 160 { {0x00, NULL } }
161}; 161};
162 162
163#define IS_AVAIL1(v, s) ((v) ? s : "")
164#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
165#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
166#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
167 163
168static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) 164static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
169{ 165{
@@ -415,8 +411,9 @@ void __init setup_arch(char **cmdline_p)
415 if (machine_desc->init_early) 411 if (machine_desc->init_early)
416 machine_desc->init_early(); 412 machine_desc->init_early();
417 413
418 setup_processor();
419 smp_init_cpus(); 414 smp_init_cpus();
415
416 setup_processor();
420 setup_arch_memory(); 417 setup_arch_memory();
421 418
422 /* copy flat DT out of .init and then unflatten it */ 419 /* copy flat DT out of .init and then unflatten it */
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index be13d12420ba..580587805fa3 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -42,8 +42,13 @@ void __init smp_prepare_boot_cpu(void)
42} 42}
43 43
44/* 44/*
45 * Initialise the CPU possible map early - this describes the CPUs 45 * Called from setup_arch() before calling setup_processor()
46 * which may be present or become present in the system. 46 *
47 * - Initialise the CPU possible map early - this describes the CPUs
48 * which may be present or become present in the system.
49 * - Call early smp init hook. This can initialize a specific multi-core
50 * IP which is say common to several platforms (hence not part of
51 * platform specific int_early() hook)
47 */ 52 */
48void __init smp_init_cpus(void) 53void __init smp_init_cpus(void)
49{ 54{
@@ -51,6 +56,9 @@ void __init smp_init_cpus(void)
51 56
52 for (i = 0; i < NR_CPUS; i++) 57 for (i = 0; i < NR_CPUS; i++)
53 set_cpu_possible(i, true); 58 set_cpu_possible(i, true);
59
60 if (plat_smp_ops.init_early_smp)
61 plat_smp_ops.init_early_smp();
54} 62}
55 63
56/* called from init ( ) => process 1 */ 64/* called from init ( ) => process 1 */
@@ -72,35 +80,29 @@ void __init smp_cpus_done(unsigned int max_cpus)
72} 80}
73 81
74/* 82/*
75 * After power-up, a non Master CPU needs to wait for Master to kick start it 83 * Default smp boot helper for Run-on-reset case where all cores start off
76 * 84 * together. Non-masters need to wait for Master to start running.
77 * The default implementation halts 85 * This is implemented using a flag in memory, which Non-masters spin-wait on.
78 * 86 * Master sets it to cpu-id of core to "ungate" it.
79 * This relies on platform specific support allowing Master to directly set
80 * this CPU's PC (to be @first_lines_of_secondary() and kick start it.
81 *
82 * In lack of such h/w assist, platforms can override this function
83 * - make this function busy-spin on a token, eventually set by Master
84 * (from arc_platform_smp_wakeup_cpu())
85 * - Once token is available, jump to @first_lines_of_secondary
86 * (using inline asm).
87 *
88 * Alert: can NOT use stack here as it has not been determined/setup for CPU.
89 * If it turns out to be elaborate, it's better to code it in assembly
90 *
91 */ 87 */
92void __weak arc_platform_smp_wait_to_boot(int cpu) 88static volatile int wake_flag;
89
90static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
93{ 91{
94 /* 92 BUG_ON(cpu == 0);
95 * As a hack for debugging - since debugger will single-step over the 93 wake_flag = cpu;
96 * FLAG insn - wrap the halt itself it in a self loop 94}
97 */ 95
98 __asm__ __volatile__( 96void arc_platform_smp_wait_to_boot(int cpu)
99 "1: \n" 97{
100 " flag 1 \n" 98 while (wake_flag != cpu)
101 " b 1b \n"); 99 ;
100
101 wake_flag = 0;
102 __asm__ __volatile__("j @first_lines_of_secondary \n");
102} 103}
103 104
105
104const char *arc_platform_smp_cpuinfo(void) 106const char *arc_platform_smp_cpuinfo(void)
105{ 107{
106 return plat_smp_ops.info ? : ""; 108 return plat_smp_ops.info ? : "";
@@ -129,8 +131,12 @@ void start_kernel_secondary(void)
129 131
130 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); 132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
131 133
132 if (machine_desc->init_smp) 134 /* Some SMP H/w setup - for each cpu */
133 machine_desc->init_smp(cpu); 135 if (plat_smp_ops.init_irq_cpu)
136 plat_smp_ops.init_irq_cpu(cpu);
137
138 if (machine_desc->init_cpu_smp)
139 machine_desc->init_cpu_smp(cpu);
134 140
135 arc_local_timer_setup(); 141 arc_local_timer_setup();
136 142
@@ -161,6 +167,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
161 if (plat_smp_ops.cpu_kick) 167 if (plat_smp_ops.cpu_kick)
162 plat_smp_ops.cpu_kick(cpu, 168 plat_smp_ops.cpu_kick(cpu,
163 (unsigned long)first_lines_of_secondary); 169 (unsigned long)first_lines_of_secondary);
170 else
171 arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
164 172
165 /* wait for 1 sec after kicking the secondary */ 173 /* wait for 1 sec after kicking the secondary */
166 wait_till = jiffies + HZ; 174 wait_till = jiffies + HZ;
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 4294761a2b3e..dfad287f1db1 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -285,7 +285,4 @@ void __init time_init(void)
285 285
286 /* sets up the periodic event timer */ 286 /* sets up the periodic event timer */
287 arc_local_timer_setup(); 287 arc_local_timer_setup();
288
289 if (machine_desc->init_time)
290 machine_desc->init_time();
291} 288}
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index dd35bde39f69..894e696bddaa 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -12,7 +12,7 @@
12#include <asm/thread_info.h> 12#include <asm/thread_info.h>
13 13
14OUTPUT_ARCH(arc) 14OUTPUT_ARCH(arc)
15ENTRY(_stext) 15ENTRY(res_service)
16 16
17#ifdef CONFIG_CPU_BIG_ENDIAN 17#ifdef CONFIG_CPU_BIG_ENDIAN
18jiffies = jiffies_64 + 4; 18jiffies = jiffies_64 + 4;
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
index 7beb941556c3..3703a4969349 100644
--- a/arch/arc/mm/Makefile
+++ b/arch/arc/mm/Makefile
@@ -8,3 +8,4 @@
8 8
9obj-y := extable.o ioremap.o dma.o fault.o init.o 9obj-y := extable.o ioremap.o dma.o fault.o init.o
10obj-y += tlb.o tlbex.o cache.o mmap.o 10obj-y += tlb.o tlbex.o cache.o mmap.o
11obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 0d1a6e96839f..ff7ff6cbb811 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -25,7 +25,7 @@ static int l2_line_sz;
25int ioc_exists; 25int ioc_exists;
26volatile int slc_enable = 1, ioc_enable = 1; 26volatile int slc_enable = 1, ioc_enable = 1;
27 27
28void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr, 28void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
29 unsigned long sz, const int cacheop); 29 unsigned long sz, const int cacheop);
30 30
31void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz); 31void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
@@ -37,7 +37,6 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
37 int n = 0; 37 int n = 0;
38 struct cpuinfo_arc_cache *p; 38 struct cpuinfo_arc_cache *p;
39 39
40#define IS_USED_RUN(v) ((v) ? "" : "(disabled) ")
41#define PR_CACHE(p, cfg, str) \ 40#define PR_CACHE(p, cfg, str) \
42 if (!(p)->ver) \ 41 if (!(p)->ver) \
43 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ 42 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
@@ -47,7 +46,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
47 (p)->sz_k, (p)->assoc, (p)->line_len, \ 46 (p)->sz_k, (p)->assoc, (p)->line_len, \
48 (p)->vipt ? "VIPT" : "PIPT", \ 47 (p)->vipt ? "VIPT" : "PIPT", \
49 (p)->alias ? " aliasing" : "", \ 48 (p)->alias ? " aliasing" : "", \
50 IS_ENABLED(cfg) ? "" : " (not used)"); 49 IS_USED_CFG(cfg));
51 50
52 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); 51 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
53 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); 52 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
@@ -63,7 +62,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
63 62
64 if (ioc_exists) 63 if (ioc_exists)
65 n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n", 64 n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
66 IS_USED_RUN(ioc_enable)); 65 IS_DISABLED_RUN(ioc_enable));
67 66
68 return buf; 67 return buf;
69} 68}
@@ -217,7 +216,7 @@ slc_chk:
217 */ 216 */
218 217
219static inline 218static inline
220void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr, 219void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
221 unsigned long sz, const int op) 220 unsigned long sz, const int op)
222{ 221{
223 unsigned int aux_cmd; 222 unsigned int aux_cmd;
@@ -254,8 +253,12 @@ void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
254 } 253 }
255} 254}
256 255
256/*
257 * For ARC700 MMUv3 I-cache and D-cache flushes
258 * Also reused for HS38 aliasing I-cache configuration
259 */
257static inline 260static inline
258void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr, 261void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
259 unsigned long sz, const int op) 262 unsigned long sz, const int op)
260{ 263{
261 unsigned int aux_cmd, aux_tag; 264 unsigned int aux_cmd, aux_tag;
@@ -290,6 +293,16 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
290 if (full_page) 293 if (full_page)
291 write_aux_reg(aux_tag, paddr); 294 write_aux_reg(aux_tag, paddr);
292 295
296 /*
297 * This is technically for MMU v4, using the MMU v3 programming model
298 * Special work for HS38 aliasing I-cache configuratino with PAE40
299 * - upper 8 bits of paddr need to be written into PTAG_HI
300 * - (and needs to be written before the lower 32 bits)
301 * Note that PTAG_HI is hoisted outside the line loop
302 */
303 if (is_pae40_enabled() && op == OP_INV_IC)
304 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
305
293 while (num_lines-- > 0) { 306 while (num_lines-- > 0) {
294 if (!full_page) { 307 if (!full_page) {
295 write_aux_reg(aux_tag, paddr); 308 write_aux_reg(aux_tag, paddr);
@@ -302,14 +315,20 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
302} 315}
303 316
304/* 317/*
305 * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache 318 * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
306 * maintenance ops (in IVIL reg), as long as icache doesn't alias. 319 * Here's how cache ops are implemented
320 *
321 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
322 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
323 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
324 * respectively, similar to MMU v3 programming model, hence
325 * __cache_line_loop_v3() is used)
307 * 326 *
308 * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is 327 * If PAE40 is enabled, independent of aliasing considerations, the higher bits
309 * specified in PTAG (similar to MMU v3) 328 * needs to be written into PTAG_HI
310 */ 329 */
311static inline 330static inline
312void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr, 331void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
313 unsigned long sz, const int cacheop) 332 unsigned long sz, const int cacheop)
314{ 333{
315 unsigned int aux_cmd; 334 unsigned int aux_cmd;
@@ -336,6 +355,22 @@ void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
336 355
337 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 356 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
338 357
358 /*
359 * For HS38 PAE40 configuration
360 * - upper 8 bits of paddr need to be written into PTAG_HI
361 * - (and needs to be written before the lower 32 bits)
362 */
363 if (is_pae40_enabled()) {
364 if (cacheop == OP_INV_IC)
365 /*
366 * Non aliasing I-cache in HS38,
367 * aliasing I-cache handled in __cache_line_loop_v3()
368 */
369 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
370 else
371 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
372 }
373
339 while (num_lines-- > 0) { 374 while (num_lines-- > 0) {
340 write_aux_reg(aux_cmd, paddr); 375 write_aux_reg(aux_cmd, paddr);
341 paddr += L1_CACHE_BYTES; 376 paddr += L1_CACHE_BYTES;
@@ -413,7 +448,7 @@ static inline void __dc_entire_op(const int op)
413/* 448/*
414 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) 449 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
415 */ 450 */
416static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, 451static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
417 unsigned long sz, const int op) 452 unsigned long sz, const int op)
418{ 453{
419 unsigned long flags; 454 unsigned long flags;
@@ -446,7 +481,7 @@ static inline void __ic_entire_inv(void)
446} 481}
447 482
448static inline void 483static inline void
449__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, 484__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
450 unsigned long sz) 485 unsigned long sz)
451{ 486{
452 unsigned long flags; 487 unsigned long flags;
@@ -463,7 +498,7 @@ __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
463#else 498#else
464 499
465struct ic_inv_args { 500struct ic_inv_args {
466 unsigned long paddr, vaddr; 501 phys_addr_t paddr, vaddr;
467 int sz; 502 int sz;
468}; 503};
469 504
@@ -474,7 +509,7 @@ static void __ic_line_inv_vaddr_helper(void *info)
474 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); 509 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
475} 510}
476 511
477static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, 512static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
478 unsigned long sz) 513 unsigned long sz)
479{ 514{
480 struct ic_inv_args ic_inv = { 515 struct ic_inv_args ic_inv = {
@@ -495,7 +530,7 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
495 530
496#endif /* CONFIG_ARC_HAS_ICACHE */ 531#endif /* CONFIG_ARC_HAS_ICACHE */
497 532
498noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) 533noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
499{ 534{
500#ifdef CONFIG_ISA_ARCV2 535#ifdef CONFIG_ISA_ARCV2
501 /* 536 /*
@@ -585,7 +620,7 @@ void flush_dcache_page(struct page *page)
585 } else if (page_mapped(page)) { 620 } else if (page_mapped(page)) {
586 621
587 /* kernel reading from page with U-mapping */ 622 /* kernel reading from page with U-mapping */
588 unsigned long paddr = (unsigned long)page_address(page); 623 phys_addr_t paddr = (unsigned long)page_address(page);
589 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; 624 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
590 625
591 if (addr_not_cache_congruent(paddr, vaddr)) 626 if (addr_not_cache_congruent(paddr, vaddr))
@@ -733,14 +768,14 @@ EXPORT_SYMBOL(flush_icache_range);
733 * builtin kernel page will not have any virtual mappings. 768 * builtin kernel page will not have any virtual mappings.
734 * kprobe on loadable module will be kernel vaddr. 769 * kprobe on loadable module will be kernel vaddr.
735 */ 770 */
736void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) 771void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
737{ 772{
738 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); 773 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
739 __ic_line_inv_vaddr(paddr, vaddr, len); 774 __ic_line_inv_vaddr(paddr, vaddr, len);
740} 775}
741 776
742/* wrapper to compile time eliminate alignment checks in flush loop */ 777/* wrapper to compile time eliminate alignment checks in flush loop */
743void __inv_icache_page(unsigned long paddr, unsigned long vaddr) 778void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
744{ 779{
745 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); 780 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
746} 781}
@@ -749,7 +784,7 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
749 * wrapper to clearout kernel or userspace mappings of a page 784 * wrapper to clearout kernel or userspace mappings of a page
750 * For kernel mappings @vaddr == @paddr 785 * For kernel mappings @vaddr == @paddr
751 */ 786 */
752void __flush_dcache_page(unsigned long paddr, unsigned long vaddr) 787void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
753{ 788{
754 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); 789 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
755} 790}
@@ -807,8 +842,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
807void copy_user_highpage(struct page *to, struct page *from, 842void copy_user_highpage(struct page *to, struct page *from,
808 unsigned long u_vaddr, struct vm_area_struct *vma) 843 unsigned long u_vaddr, struct vm_area_struct *vma)
809{ 844{
810 unsigned long kfrom = (unsigned long)page_address(from); 845 void *kfrom = kmap_atomic(from);
811 unsigned long kto = (unsigned long)page_address(to); 846 void *kto = kmap_atomic(to);
812 int clean_src_k_mappings = 0; 847 int clean_src_k_mappings = 0;
813 848
814 /* 849 /*
@@ -818,13 +853,16 @@ void copy_user_highpage(struct page *to, struct page *from,
818 * 853 *
819 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is 854 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
820 * equally valid for SRC page as well 855 * equally valid for SRC page as well
856 *
857 * For !VIPT cache, all of this gets compiled out as
858 * addr_not_cache_congruent() is 0
821 */ 859 */
822 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { 860 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
823 __flush_dcache_page(kfrom, u_vaddr); 861 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
824 clean_src_k_mappings = 1; 862 clean_src_k_mappings = 1;
825 } 863 }
826 864
827 copy_page((void *)kto, (void *)kfrom); 865 copy_page(kto, kfrom);
828 866
829 /* 867 /*
830 * Mark DST page K-mapping as dirty for a later finalization by 868 * Mark DST page K-mapping as dirty for a later finalization by
@@ -841,11 +879,14 @@ void copy_user_highpage(struct page *to, struct page *from,
841 * sync the kernel mapping back to physical page 879 * sync the kernel mapping back to physical page
842 */ 880 */
843 if (clean_src_k_mappings) { 881 if (clean_src_k_mappings) {
844 __flush_dcache_page(kfrom, kfrom); 882 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
845 set_bit(PG_dc_clean, &from->flags); 883 set_bit(PG_dc_clean, &from->flags);
846 } else { 884 } else {
847 clear_bit(PG_dc_clean, &from->flags); 885 clear_bit(PG_dc_clean, &from->flags);
848 } 886 }
887
888 kunmap_atomic(kto);
889 kunmap_atomic(kfrom);
849} 890}
850 891
851void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) 892void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index d948e4e9d89c..af63f4a13e60 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -18,7 +18,14 @@
18#include <asm/pgalloc.h> 18#include <asm/pgalloc.h>
19#include <asm/mmu.h> 19#include <asm/mmu.h>
20 20
21static int handle_vmalloc_fault(unsigned long address) 21/*
22 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
23 * Refer to asm/processor.h for System Memory Map
24 *
25 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
26 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
27 */
28noinline static int handle_kernel_vaddr_fault(unsigned long address)
22{ 29{
23 /* 30 /*
24 * Synchronize this task's top level page-table 31 * Synchronize this task's top level page-table
@@ -72,8 +79,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
72 * only copy the information from the master page table, 79 * only copy the information from the master page table,
73 * nothing more. 80 * nothing more.
74 */ 81 */
75 if (address >= VMALLOC_START && address <= VMALLOC_END) { 82 if (address >= VMALLOC_START) {
76 ret = handle_vmalloc_fault(address); 83 ret = handle_kernel_vaddr_fault(address);
77 if (unlikely(ret)) 84 if (unlikely(ret))
78 goto bad_area_nosemaphore; 85 goto bad_area_nosemaphore;
79 else 86 else
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
new file mode 100644
index 000000000000..065ee6bfa82a
--- /dev/null
+++ b/arch/arc/mm/highmem.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#include <linux/bootmem.h>
11#include <linux/export.h>
12#include <linux/highmem.h>
13#include <asm/processor.h>
14#include <asm/pgtable.h>
15#include <asm/pgalloc.h>
16#include <asm/tlbflush.h>
17
18/*
19 * HIGHMEM API:
20 *
21 * kmap() API provides sleep semantics hence refered to as "permanent maps"
22 * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
23 * for book-keeping
24 *
25 * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
26 * shortlived ala "temporary mappings" which historically were implemented as
27 * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
28 *
29 * Both these facts combined (preemption disabled and per-cpu allocation)
30 * means the total number of concurrent fixmaps will be limited to max
31 * such allocations in a single control path. Thus KM_TYPE_NR (another
32 * historic relic) is a small'ish number which caps max percpu fixmaps
33 *
34 * ARC HIGHMEM Details
35 *
36 * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
37 * is now shared between vmalloc and kmap (non overlapping though)
38 *
39 * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
40 * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
41 * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
42 *
43 * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
44 * slots across NR_CPUS would be more than sufficient (generic code defines
45 * KM_TYPE_NR as 20).
46 *
47 * - pkmap being preemptible, in theory could do with more than 256 concurrent
48 * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
49 * the PGD and only works with a single page table @pkmap_page_table, hence
50 * sets the limit
51 */
52
53extern pte_t * pkmap_page_table;
54static pte_t * fixmap_page_table;
55
56void *kmap(struct page *page)
57{
58 BUG_ON(in_interrupt());
59 if (!PageHighMem(page))
60 return page_address(page);
61
62 return kmap_high(page);
63}
64
65void *kmap_atomic(struct page *page)
66{
67 int idx, cpu_idx;
68 unsigned long vaddr;
69
70 preempt_disable();
71 pagefault_disable();
72 if (!PageHighMem(page))
73 return page_address(page);
74
75 cpu_idx = kmap_atomic_idx_push();
76 idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
77 vaddr = FIXMAP_ADDR(idx);
78
79 set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
80 mk_pte(page, kmap_prot));
81
82 return (void *)vaddr;
83}
84EXPORT_SYMBOL(kmap_atomic);
85
86void __kunmap_atomic(void *kv)
87{
88 unsigned long kvaddr = (unsigned long)kv;
89
90 if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
91
92 /*
93 * Because preemption is disabled, this vaddr can be associated
94 * with the current allocated index.
95 * But in case of multiple live kmap_atomic(), it still relies on
96 * callers to unmap in right order.
97 */
98 int cpu_idx = kmap_atomic_idx();
99 int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
100
101 WARN_ON(kvaddr != FIXMAP_ADDR(idx));
102
103 pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
104 local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
105
106 kmap_atomic_idx_pop();
107 }
108
109 pagefault_enable();
110 preempt_enable();
111}
112EXPORT_SYMBOL(__kunmap_atomic);
113
114noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
115{
116 pgd_t *pgd_k;
117 pud_t *pud_k;
118 pmd_t *pmd_k;
119 pte_t *pte_k;
120
121 pgd_k = pgd_offset_k(kvaddr);
122 pud_k = pud_offset(pgd_k, kvaddr);
123 pmd_k = pmd_offset(pud_k, kvaddr);
124
125 pte_k = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
126 pmd_populate_kernel(&init_mm, pmd_k, pte_k);
127 return pte_k;
128}
129
130void kmap_init(void)
131{
132 /* Due to recursive include hell, we can't do this in processor.h */
133 BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
134
135 BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
136 pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
137
138 BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
139 fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
140}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index d44eedd8c322..a9305b5a2cd4 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -15,6 +15,7 @@
15#endif 15#endif
16#include <linux/swap.h> 16#include <linux/swap.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/highmem.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
20#include <asm/sections.h> 21#include <asm/sections.h>
@@ -24,16 +25,22 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
24char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE); 25char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
25EXPORT_SYMBOL(empty_zero_page); 26EXPORT_SYMBOL(empty_zero_page);
26 27
27/* Default tot mem from .config */ 28static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE;
28static unsigned long arc_mem_sz = 0x20000000; /* some default */ 29static unsigned long low_mem_sz;
30
31#ifdef CONFIG_HIGHMEM
32static unsigned long min_high_pfn;
33static u64 high_mem_start;
34static u64 high_mem_sz;
35#endif
29 36
30/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ 37/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
31static int __init setup_mem_sz(char *str) 38static int __init setup_mem_sz(char *str)
32{ 39{
33 arc_mem_sz = memparse(str, NULL) & PAGE_MASK; 40 low_mem_sz = memparse(str, NULL) & PAGE_MASK;
34 41
35 /* early console might not be setup yet - it will show up later */ 42 /* early console might not be setup yet - it will show up later */
36 pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(arc_mem_sz)); 43 pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(low_mem_sz));
37 44
38 return 0; 45 return 0;
39} 46}
@@ -41,8 +48,22 @@ early_param("mem", setup_mem_sz);
41 48
42void __init early_init_dt_add_memory_arch(u64 base, u64 size) 49void __init early_init_dt_add_memory_arch(u64 base, u64 size)
43{ 50{
44 arc_mem_sz = size & PAGE_MASK; 51 int in_use = 0;
45 pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz)); 52
53 if (!low_mem_sz) {
54 BUG_ON(base != low_mem_start);
55 low_mem_sz = size;
56 in_use = 1;
57 } else {
58#ifdef CONFIG_HIGHMEM
59 high_mem_start = base;
60 high_mem_sz = size;
61 in_use = 1;
62#endif
63 }
64
65 pr_info("Memory @ %llx [%lldM] %s\n",
66 base, TO_MB(size), !in_use ? "Not used":"");
46} 67}
47 68
48#ifdef CONFIG_BLK_DEV_INITRD 69#ifdef CONFIG_BLK_DEV_INITRD
@@ -72,46 +93,62 @@ early_param("initrd", early_initrd);
72void __init setup_arch_memory(void) 93void __init setup_arch_memory(void)
73{ 94{
74 unsigned long zones_size[MAX_NR_ZONES]; 95 unsigned long zones_size[MAX_NR_ZONES];
75 unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz; 96 unsigned long zones_holes[MAX_NR_ZONES];
76 97
77 init_mm.start_code = (unsigned long)_text; 98 init_mm.start_code = (unsigned long)_text;
78 init_mm.end_code = (unsigned long)_etext; 99 init_mm.end_code = (unsigned long)_etext;
79 init_mm.end_data = (unsigned long)_edata; 100 init_mm.end_data = (unsigned long)_edata;
80 init_mm.brk = (unsigned long)_end; 101 init_mm.brk = (unsigned long)_end;
81 102
82 /*
83 * We do it here, so that memory is correctly instantiated
84 * even if "mem=xxx" cmline over-ride is given and/or
85 * DT has memory node. Each causes an update to @arc_mem_sz
86 * and we finally add memory one here
87 */
88 memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz);
89
90 /*------------- externs in mm need setting up ---------------*/
91
92 /* first page of system - kernel .vector starts here */ 103 /* first page of system - kernel .vector starts here */
93 min_low_pfn = ARCH_PFN_OFFSET; 104 min_low_pfn = ARCH_PFN_OFFSET;
94 105
95 /* Last usable page of low mem (no HIGHMEM yet for ARC port) */ 106 /* Last usable page of low mem */
96 max_low_pfn = max_pfn = PFN_DOWN(end_mem); 107 max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
97 108
98 max_mapnr = max_low_pfn - min_low_pfn; 109#ifdef CONFIG_HIGHMEM
110 min_high_pfn = PFN_DOWN(high_mem_start);
111 max_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
112#endif
113
114 max_mapnr = max_pfn - min_low_pfn;
99 115
100 /*------------- reserve kernel image -----------------------*/ 116 /*------------- bootmem allocator setup -----------------------*/
101 memblock_reserve(CONFIG_LINUX_LINK_BASE, 117
102 __pa(_end) - CONFIG_LINUX_LINK_BASE); 118 /*
119 * seed the bootmem allocator after any DT memory node parsing or
120 * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
121 *
122 * Only low mem is added, otherwise we have crashes when allocating
123 * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
124 * avail memory, ending in highmem with a > 32-bit address. However
125 * it then tries to memset it with a truncaed 32-bit handle, causing
126 * the crash
127 */
128
129 memblock_add(low_mem_start, low_mem_sz);
130 memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
103 131
104#ifdef CONFIG_BLK_DEV_INITRD 132#ifdef CONFIG_BLK_DEV_INITRD
105 /*------------- reserve initrd image -----------------------*/
106 if (initrd_start) 133 if (initrd_start)
107 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); 134 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
108#endif 135#endif
109 136
110 memblock_dump_all(); 137 memblock_dump_all();
111 138
112 /*-------------- node setup --------------------------------*/ 139 /*----------------- node/zones setup --------------------------*/
113 memset(zones_size, 0, sizeof(zones_size)); 140 memset(zones_size, 0, sizeof(zones_size));
114 zones_size[ZONE_NORMAL] = max_mapnr; 141 memset(zones_holes, 0, sizeof(zones_holes));
142
143 zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
144 zones_holes[ZONE_NORMAL] = 0;
145
146#ifdef CONFIG_HIGHMEM
147 zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
148
149 /* This handles the peripheral address space hole */
150 zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn;
151#endif
115 152
116 /* 153 /*
117 * We can't use the helper free_area_init(zones[]) because it uses 154 * We can't use the helper free_area_init(zones[]) because it uses
@@ -122,9 +159,12 @@ void __init setup_arch_memory(void)
122 free_area_init_node(0, /* node-id */ 159 free_area_init_node(0, /* node-id */
123 zones_size, /* num pages per zone */ 160 zones_size, /* num pages per zone */
124 min_low_pfn, /* first pfn of node */ 161 min_low_pfn, /* first pfn of node */
125 NULL); /* NO holes */ 162 zones_holes); /* holes */
126 163
127 high_memory = (void *)end_mem; 164#ifdef CONFIG_HIGHMEM
165 high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
166 kmap_init();
167#endif
128} 168}
129 169
130/* 170/*
@@ -135,6 +175,14 @@ void __init setup_arch_memory(void)
135 */ 175 */
136void __init mem_init(void) 176void __init mem_init(void)
137{ 177{
178#ifdef CONFIG_HIGHMEM
179 unsigned long tmp;
180
181 reset_all_zones_managed_pages();
182 for (tmp = min_high_pfn; tmp < max_pfn; tmp++)
183 free_highmem_page(pfn_to_page(tmp));
184#endif
185
138 free_all_bootmem(); 186 free_all_bootmem();
139 mem_init_print_info(NULL); 187 mem_init_print_info(NULL);
140} 188}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 2c7ce8bb7475..0ee739846847 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -109,6 +109,10 @@ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
109static inline void __tlb_entry_erase(void) 109static inline void __tlb_entry_erase(void)
110{ 110{
111 write_aux_reg(ARC_REG_TLBPD1, 0); 111 write_aux_reg(ARC_REG_TLBPD1, 0);
112
113 if (is_pae40_enabled())
114 write_aux_reg(ARC_REG_TLBPD1HI, 0);
115
112 write_aux_reg(ARC_REG_TLBPD0, 0); 116 write_aux_reg(ARC_REG_TLBPD0, 0);
113 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); 117 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
114} 118}
@@ -182,7 +186,7 @@ static void utlb_invalidate(void)
182 186
183} 187}
184 188
185static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) 189static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
186{ 190{
187 unsigned int idx; 191 unsigned int idx;
188 192
@@ -225,10 +229,14 @@ static void tlb_entry_erase(unsigned int vaddr_n_asid)
225 write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry); 229 write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
226} 230}
227 231
228static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) 232static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
229{ 233{
230 write_aux_reg(ARC_REG_TLBPD0, pd0); 234 write_aux_reg(ARC_REG_TLBPD0, pd0);
231 write_aux_reg(ARC_REG_TLBPD1, pd1); 235 write_aux_reg(ARC_REG_TLBPD1, pd1);
236
237 if (is_pae40_enabled())
238 write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
239
232 write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry); 240 write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
233} 241}
234 242
@@ -240,22 +248,39 @@ static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
240 248
241noinline void local_flush_tlb_all(void) 249noinline void local_flush_tlb_all(void)
242{ 250{
251 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
243 unsigned long flags; 252 unsigned long flags;
244 unsigned int entry; 253 unsigned int entry;
245 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; 254 int num_tlb = mmu->sets * mmu->ways;
246 255
247 local_irq_save(flags); 256 local_irq_save(flags);
248 257
249 /* Load PD0 and PD1 with template for a Blank Entry */ 258 /* Load PD0 and PD1 with template for a Blank Entry */
250 write_aux_reg(ARC_REG_TLBPD1, 0); 259 write_aux_reg(ARC_REG_TLBPD1, 0);
260
261 if (is_pae40_enabled())
262 write_aux_reg(ARC_REG_TLBPD1HI, 0);
263
251 write_aux_reg(ARC_REG_TLBPD0, 0); 264 write_aux_reg(ARC_REG_TLBPD0, 0);
252 265
253 for (entry = 0; entry < mmu->num_tlb; entry++) { 266 for (entry = 0; entry < num_tlb; entry++) {
254 /* write this entry to the TLB */ 267 /* write this entry to the TLB */
255 write_aux_reg(ARC_REG_TLBINDEX, entry); 268 write_aux_reg(ARC_REG_TLBINDEX, entry);
256 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); 269 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
257 } 270 }
258 271
272 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
273 const int stlb_idx = 0x800;
274
275 /* Blank sTLB entry */
276 write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
277
278 for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
279 write_aux_reg(ARC_REG_TLBINDEX, entry);
280 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
281 }
282 }
283
259 utlb_invalidate(); 284 utlb_invalidate();
260 285
261 local_irq_restore(flags); 286 local_irq_restore(flags);
@@ -409,6 +434,15 @@ static inline void ipi_flush_tlb_range(void *arg)
409 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 434 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
410} 435}
411 436
437#ifdef CONFIG_TRANSPARENT_HUGEPAGE
438static inline void ipi_flush_pmd_tlb_range(void *arg)
439{
440 struct tlb_args *ta = arg;
441
442 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
443}
444#endif
445
412static inline void ipi_flush_tlb_kernel_range(void *arg) 446static inline void ipi_flush_tlb_kernel_range(void *arg)
413{ 447{
414 struct tlb_args *ta = (struct tlb_args *)arg; 448 struct tlb_args *ta = (struct tlb_args *)arg;
@@ -449,6 +483,20 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
449 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); 483 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
450} 484}
451 485
486#ifdef CONFIG_TRANSPARENT_HUGEPAGE
487void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
488 unsigned long end)
489{
490 struct tlb_args ta = {
491 .ta_vma = vma,
492 .ta_start = start,
493 .ta_end = end
494 };
495
496 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
497}
498#endif
499
452void flush_tlb_kernel_range(unsigned long start, unsigned long end) 500void flush_tlb_kernel_range(unsigned long start, unsigned long end)
453{ 501{
454 struct tlb_args ta = { 502 struct tlb_args ta = {
@@ -463,11 +511,12 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
463/* 511/*
464 * Routine to create a TLB entry 512 * Routine to create a TLB entry
465 */ 513 */
466void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 514void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
467{ 515{
468 unsigned long flags; 516 unsigned long flags;
469 unsigned int asid_or_sasid, rwx; 517 unsigned int asid_or_sasid, rwx;
470 unsigned long pd0, pd1; 518 unsigned long pd0;
519 pte_t pd1;
471 520
472 /* 521 /*
473 * create_tlb() assumes that current->mm == vma->mm, since 522 * create_tlb() assumes that current->mm == vma->mm, since
@@ -499,9 +548,9 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
499 548
500 local_irq_save(flags); 549 local_irq_save(flags);
501 550
502 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); 551 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
503 552
504 address &= PAGE_MASK; 553 vaddr &= PAGE_MASK;
505 554
506 /* update this PTE credentials */ 555 /* update this PTE credentials */
507 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); 556 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
@@ -511,7 +560,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
511 /* ASID for this task */ 560 /* ASID for this task */
512 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; 561 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
513 562
514 pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); 563 pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
515 564
516 /* 565 /*
517 * ARC MMU provides fully orthogonal access bits for K/U mode, 566 * ARC MMU provides fully orthogonal access bits for K/U mode,
@@ -547,7 +596,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
547 pte_t *ptep) 596 pte_t *ptep)
548{ 597{
549 unsigned long vaddr = vaddr_unaligned & PAGE_MASK; 598 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
550 unsigned long paddr = pte_val(*ptep) & PAGE_MASK; 599 phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
551 struct page *page = pfn_to_page(pte_pfn(*ptep)); 600 struct page *page = pfn_to_page(pte_pfn(*ptep));
552 601
553 create_tlb(vma, vaddr, ptep); 602 create_tlb(vma, vaddr, ptep);
@@ -580,6 +629,95 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
580 } 629 }
581} 630}
582 631
632#ifdef CONFIG_TRANSPARENT_HUGEPAGE
633
634/*
635 * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
636 * support.
637 *
638 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
639 * new bit "SZ" in TLB page desciptor to distinguish between them.
640 * Super Page size is configurable in hardware (4K to 16M), but fixed once
641 * RTL builds.
642 *
643 * The exact THP size a Linx configuration will support is a function of:
644 * - MMU page size (typical 8K, RTL fixed)
645 * - software page walker address split between PGD:PTE:PFN (typical
646 * 11:8:13, but can be changed with 1 line)
647 * So for above default, THP size supported is 8K * (2^8) = 2M
648 *
649 * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
650 * reduces to 1 level (as PTE is folded into PGD and canonically referred
651 * to as PMD).
652 * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
653 */
654
655void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
656 pmd_t *pmd)
657{
658 pte_t pte = __pte(pmd_val(*pmd));
659 update_mmu_cache(vma, addr, &pte);
660}
661
662void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
663 pgtable_t pgtable)
664{
665 struct list_head *lh = (struct list_head *) pgtable;
666
667 assert_spin_locked(&mm->page_table_lock);
668
669 /* FIFO */
670 if (!pmd_huge_pte(mm, pmdp))
671 INIT_LIST_HEAD(lh);
672 else
673 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
674 pmd_huge_pte(mm, pmdp) = pgtable;
675}
676
677pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
678{
679 struct list_head *lh;
680 pgtable_t pgtable;
681
682 assert_spin_locked(&mm->page_table_lock);
683
684 pgtable = pmd_huge_pte(mm, pmdp);
685 lh = (struct list_head *) pgtable;
686 if (list_empty(lh))
687 pmd_huge_pte(mm, pmdp) = NULL;
688 else {
689 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
690 list_del(lh);
691 }
692
693 pte_val(pgtable[0]) = 0;
694 pte_val(pgtable[1]) = 0;
695
696 return pgtable;
697}
698
699void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
700 unsigned long end)
701{
702 unsigned int cpu;
703 unsigned long flags;
704
705 local_irq_save(flags);
706
707 cpu = smp_processor_id();
708
709 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
710 unsigned int asid = hw_pid(vma->vm_mm, cpu);
711
712 /* No need to loop here: this will always be for 1 Huge Page */
713 tlb_entry_erase(start | _PAGE_HW_SZ | asid);
714 }
715
716 local_irq_restore(flags);
717}
718
719#endif
720
583/* Read the Cache Build Confuration Registers, Decode them and save into 721/* Read the Cache Build Confuration Registers, Decode them and save into
584 * the cpuinfo structure for later use. 722 * the cpuinfo structure for later use.
585 * No Validation is done here, simply read/convert the BCRs 723 * No Validation is done here, simply read/convert the BCRs
@@ -598,10 +736,10 @@ void read_decode_mmu_bcr(void)
598 736
599 struct bcr_mmu_3 { 737 struct bcr_mmu_3 {
600#ifdef CONFIG_CPU_BIG_ENDIAN 738#ifdef CONFIG_CPU_BIG_ENDIAN
601 unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, 739 unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
602 u_itlb:4, u_dtlb:4; 740 u_itlb:4, u_dtlb:4;
603#else 741#else
604 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, 742 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
605 ways:4, ver:8; 743 ways:4, ver:8;
606#endif 744#endif
607 } *mmu3; 745 } *mmu3;
@@ -622,7 +760,7 @@ void read_decode_mmu_bcr(void)
622 760
623 if (mmu->ver <= 2) { 761 if (mmu->ver <= 2) {
624 mmu2 = (struct bcr_mmu_1_2 *)&tmp; 762 mmu2 = (struct bcr_mmu_1_2 *)&tmp;
625 mmu->pg_sz_k = TO_KB(PAGE_SIZE); 763 mmu->pg_sz_k = TO_KB(0x2000);
626 mmu->sets = 1 << mmu2->sets; 764 mmu->sets = 1 << mmu2->sets;
627 mmu->ways = 1 << mmu2->ways; 765 mmu->ways = 1 << mmu2->ways;
628 mmu->u_dtlb = mmu2->u_dtlb; 766 mmu->u_dtlb = mmu2->u_dtlb;
@@ -634,6 +772,7 @@ void read_decode_mmu_bcr(void)
634 mmu->ways = 1 << mmu3->ways; 772 mmu->ways = 1 << mmu3->ways;
635 mmu->u_dtlb = mmu3->u_dtlb; 773 mmu->u_dtlb = mmu3->u_dtlb;
636 mmu->u_itlb = mmu3->u_itlb; 774 mmu->u_itlb = mmu3->u_itlb;
775 mmu->sasid = mmu3->sasid;
637 } else { 776 } else {
638 mmu4 = (struct bcr_mmu_4 *)&tmp; 777 mmu4 = (struct bcr_mmu_4 *)&tmp;
639 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); 778 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
@@ -642,9 +781,9 @@ void read_decode_mmu_bcr(void)
642 mmu->ways = mmu4->n_ways * 2; 781 mmu->ways = mmu4->n_ways * 2;
643 mmu->u_dtlb = mmu4->u_dtlb * 4; 782 mmu->u_dtlb = mmu4->u_dtlb * 4;
644 mmu->u_itlb = mmu4->u_itlb * 4; 783 mmu->u_itlb = mmu4->u_itlb * 4;
784 mmu->sasid = mmu4->sasid;
785 mmu->pae = mmu4->pae;
645 } 786 }
646
647 mmu->num_tlb = mmu->sets * mmu->ways;
648} 787}
649 788
650char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) 789char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
@@ -655,14 +794,15 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
655 794
656 if (p_mmu->s_pg_sz_m) 795 if (p_mmu->s_pg_sz_m)
657 scnprintf(super_pg, 64, "%dM Super Page%s, ", 796 scnprintf(super_pg, 64, "%dM Super Page%s, ",
658 p_mmu->s_pg_sz_m, " (not used)"); 797 p_mmu->s_pg_sz_m,
798 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
659 799
660 n += scnprintf(buf + n, len - n, 800 n += scnprintf(buf + n, len - n,
661 "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n", 801 "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
662 p_mmu->ver, p_mmu->pg_sz_k, super_pg, 802 p_mmu->ver, p_mmu->pg_sz_k, super_pg,
663 p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, 803 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
664 p_mmu->u_dtlb, p_mmu->u_itlb, 804 p_mmu->u_dtlb, p_mmu->u_itlb,
665 IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : ""); 805 IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
666 806
667 return buf; 807 return buf;
668} 808}
@@ -690,6 +830,14 @@ void arc_mmu_init(void)
690 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE)) 830 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
691 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); 831 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
692 832
833 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
834 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
835 panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
836 (unsigned long)TO_MB(HPAGE_PMD_SIZE));
837
838 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
839 panic("Hardware doesn't support PAE40\n");
840
693 /* Enable the MMU */ 841 /* Enable the MMU */
694 write_aux_reg(ARC_REG_PID, MMU_ENABLE); 842 write_aux_reg(ARC_REG_PID, MMU_ENABLE);
695 843
@@ -725,15 +873,15 @@ void arc_mmu_init(void)
725 * the duplicate one. 873 * the duplicate one.
726 * -Knob to be verbose abt it.(TODO: hook them up to debugfs) 874 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
727 */ 875 */
728volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */ 876volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
729 877
730void do_tlb_overlap_fault(unsigned long cause, unsigned long address, 878void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
731 struct pt_regs *regs) 879 struct pt_regs *regs)
732{ 880{
733 int set, way, n;
734 unsigned long flags, is_valid;
735 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; 881 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
736 unsigned int pd0[mmu->ways], pd1[mmu->ways]; 882 unsigned int pd0[mmu->ways];
883 unsigned long flags;
884 int set;
737 885
738 local_irq_save(flags); 886 local_irq_save(flags);
739 887
@@ -743,14 +891,16 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
743 /* loop thru all sets of TLB */ 891 /* loop thru all sets of TLB */
744 for (set = 0; set < mmu->sets; set++) { 892 for (set = 0; set < mmu->sets; set++) {
745 893
894 int is_valid, way;
895
746 /* read out all the ways of current set */ 896 /* read out all the ways of current set */
747 for (way = 0, is_valid = 0; way < mmu->ways; way++) { 897 for (way = 0, is_valid = 0; way < mmu->ways; way++) {
748 write_aux_reg(ARC_REG_TLBINDEX, 898 write_aux_reg(ARC_REG_TLBINDEX,
749 SET_WAY_TO_IDX(mmu, set, way)); 899 SET_WAY_TO_IDX(mmu, set, way));
750 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); 900 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
751 pd0[way] = read_aux_reg(ARC_REG_TLBPD0); 901 pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
752 pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
753 is_valid |= pd0[way] & _PAGE_PRESENT; 902 is_valid |= pd0[way] & _PAGE_PRESENT;
903 pd0[way] &= PAGE_MASK;
754 } 904 }
755 905
756 /* If all the WAYS in SET are empty, skip to next SET */ 906 /* If all the WAYS in SET are empty, skip to next SET */
@@ -759,30 +909,28 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
759 909
760 /* Scan the set for duplicate ways: needs a nested loop */ 910 /* Scan the set for duplicate ways: needs a nested loop */
761 for (way = 0; way < mmu->ways - 1; way++) { 911 for (way = 0; way < mmu->ways - 1; way++) {
912
913 int n;
914
762 if (!pd0[way]) 915 if (!pd0[way])
763 continue; 916 continue;
764 917
765 for (n = way + 1; n < mmu->ways; n++) { 918 for (n = way + 1; n < mmu->ways; n++) {
766 if ((pd0[way] & PAGE_MASK) == 919 if (pd0[way] != pd0[n])
767 (pd0[n] & PAGE_MASK)) { 920 continue;
768 921
769 if (dup_pd_verbose) { 922 if (!dup_pd_silent)
770 pr_info("Duplicate PD's @" 923 pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
771 "[%d:%d]/[%d:%d]\n", 924 pd0[way], set, way, n);
772 set, way, set, n); 925
773 pr_info("TLBPD0[%u]: %08x\n", 926 /*
774 way, pd0[way]); 927 * clear entry @way and not @n.
775 } 928 * This is critical to our optimised loop
776 929 */
777 /* 930 pd0[way] = 0;
778 * clear entry @way and not @n. This is 931 write_aux_reg(ARC_REG_TLBINDEX,
779 * critical to our optimised loop
780 */
781 pd0[way] = pd1[way] = 0;
782 write_aux_reg(ARC_REG_TLBINDEX,
783 SET_WAY_TO_IDX(mmu, set, way)); 932 SET_WAY_TO_IDX(mmu, set, way));
784 __tlb_entry_erase(); 933 __tlb_entry_erase();
785 }
786 } 934 }
787 } 935 }
788 } 936 }
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index f6f4c3cb505d..63860adc4814 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -205,20 +205,38 @@ ex_saved_reg1:
205#endif 205#endif
206 206
207 lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD 207 lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
208 ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr 208 ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr
209 and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags 209 tst r3, r3
210 ; contains Ptr to Page Table 210 bz do_slow_path_pf ; if no Page Table, do page fault
211 bz.d do_slow_path_pf ; if no Page Table, do page fault 211
212#ifdef CONFIG_TRANSPARENT_HUGEPAGE
213 and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp)
214 add2.nz r1, r1, r0
215 bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk
216 mov.nz r0, r3
217
218#endif
219 and r1, r3, PAGE_MASK
212 220
213 ; Get the PTE entry: The idea is 221 ; Get the PTE entry: The idea is
214 ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr 222 ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
215 ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index 223 ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
216 ; (3) z = pgtbl[y] 224 ; (3) z = (pgtbl + y * 4)
217 ; To avoid the multiply by in end, we do the -2, <<2 below 225
226#ifdef CONFIG_ARC_HAS_PAE40
227#define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */
228#else
229#define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */
230#endif
231
232 ; multiply in step (3) above avoided by shifting lesser in step (1)
233 lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG )
234 and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG )
235 ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40)
236 ; r1: PTE ptr
237
2382:
218 239
219 lsr r0, r2, (PAGE_SHIFT - 2)
220 and r0, r0, ( (PTRS_PER_PTE - 1) << 2)
221 ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr
222#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 240#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
223 and.f 0, r0, _PAGE_PRESENT 241 and.f 0, r0, _PAGE_PRESENT
224 bz 1f 242 bz 1f
@@ -233,18 +251,23 @@ ex_saved_reg1:
233;----------------------------------------------------------------- 251;-----------------------------------------------------------------
234; Convert Linux PTE entry into TLB entry 252; Convert Linux PTE entry into TLB entry
235; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu 253; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
254; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI])
236; IN: r0 = PTE, r1 = ptr to PTE 255; IN: r0 = PTE, r1 = ptr to PTE
237 256
238.macro CONV_PTE_TO_TLB 257.macro CONV_PTE_TO_TLB
239 and r3, r0, PTE_BITS_RWX ; r w x 258 and r3, r0, PTE_BITS_RWX ; r w x
240 lsl r2, r3, 3 ; r w x 0 0 0 (GLOBAL, kernel only) 259 lsl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only)
241 and.f 0, r0, _PAGE_GLOBAL 260 and.f 0, r0, _PAGE_GLOBAL
242 or.z r2, r2, r3 ; r w x r w x (!GLOBAL, user page) 261 or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
243 262
244 and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE 263 and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
245 or r3, r3, r2 264 or r3, r3, r2
246 265
247 sr r3, [ARC_REG_TLBPD1] ; these go in PD1 266 sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C
267#ifdef CONFIG_ARC_HAS_PAE40
268 ld r3, [r1, 4] ; paddr[39..32]
269 sr r3, [ARC_REG_TLBPD1HI]
270#endif
248 271
249 and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb 272 and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
250 273
@@ -365,7 +388,7 @@ ENTRY(EV_TLBMissD)
365 lr r3, [ecr] 388 lr r3, [ecr]
366 or r0, r0, _PAGE_ACCESSED ; Accessed bit always 389 or r0, r0, _PAGE_ACCESSED ; Accessed bit always
367 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? 390 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
368 or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well 391 or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well
369 st_s r0, [r1] ; Write back PTE 392 st_s r0, [r1] ; Write back PTE
370 393
371 CONV_PTE_TO_TLB 394 CONV_PTE_TO_TLB
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 0a77b19e1df8..1b0f0f458a2b 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -455,11 +455,6 @@ static void __init axs103_early_init(void)
455 axs10x_print_board_ver(AXC003_CREG + 4088, "AXC003 CPU Card"); 455 axs10x_print_board_ver(AXC003_CREG + 4088, "AXC003 CPU Card");
456 456
457 axs10x_early_init(); 457 axs10x_early_init();
458
459#ifdef CONFIG_ARC_MCIP
460 /* No Hardware init, but filling the smp ops callbacks */
461 mcip_init_early_smp();
462#endif
463} 458}
464#endif 459#endif
465 460
@@ -487,9 +482,6 @@ static const char *axs103_compat[] __initconst = {
487MACHINE_START(AXS103, "axs103") 482MACHINE_START(AXS103, "axs103")
488 .dt_compat = axs103_compat, 483 .dt_compat = axs103_compat,
489 .init_early = axs103_early_init, 484 .init_early = axs103_early_init,
490#ifdef CONFIG_ARC_MCIP
491 .init_smp = mcip_init_smp,
492#endif
493MACHINE_END 485MACHINE_END
494 486
495/* 487/*
diff --git a/arch/arc/plat-sim/platform.c b/arch/arc/plat-sim/platform.c
index d9e35b4a2f08..dde692812bc1 100644
--- a/arch/arc/plat-sim/platform.c
+++ b/arch/arc/plat-sim/platform.c
@@ -30,8 +30,4 @@ static const char *simulation_compat[] __initconst = {
30 30
31MACHINE_START(SIMULATION, "simulation") 31MACHINE_START(SIMULATION, "simulation")
32 .dt_compat = simulation_compat, 32 .dt_compat = simulation_compat,
33#ifdef CONFIG_ARC_MCIP
34 .init_early = mcip_init_early_smp,
35 .init_smp = mcip_init_smp,
36#endif
37MACHINE_END 33MACHINE_END
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 72ad724c67ae..f1ed1109f488 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -645,6 +645,7 @@ config ARCH_SHMOBILE_LEGACY
645 645
646config ARCH_RPC 646config ARCH_RPC
647 bool "RiscPC" 647 bool "RiscPC"
648 depends on MMU
648 select ARCH_ACORN 649 select ARCH_ACORN
649 select ARCH_MAY_HAVE_PC_FDC 650 select ARCH_MAY_HAVE_PC_FDC
650 select ARCH_SPARSEMEM_ENABLE 651 select ARCH_SPARSEMEM_ENABLE
@@ -819,6 +820,7 @@ config ARCH_VIRT
819 bool "Dummy Virtual Machine" if ARCH_MULTI_V7 820 bool "Dummy Virtual Machine" if ARCH_MULTI_V7
820 select ARM_AMBA 821 select ARM_AMBA
821 select ARM_GIC 822 select ARM_GIC
823 select ARM_GIC_V3
822 select ARM_PSCI 824 select ARM_PSCI
823 select HAVE_ARM_ARCH_TIMER 825 select HAVE_ARM_ARCH_TIMER
824 826
@@ -1410,7 +1412,6 @@ config HAVE_ARM_ARCH_TIMER
1410 1412
1411config HAVE_ARM_TWD 1413config HAVE_ARM_TWD
1412 bool 1414 bool
1413 depends on SMP
1414 select CLKSRC_OF if OF 1415 select CLKSRC_OF if OF
1415 help 1416 help
1416 This options enables support for the ARM timer and watchdog unit 1417 This options enables support for the ARM timer and watchdog unit
@@ -1470,6 +1471,8 @@ choice
1470 1471
1471 config VMSPLIT_3G 1472 config VMSPLIT_3G
1472 bool "3G/1G user/kernel split" 1473 bool "3G/1G user/kernel split"
1474 config VMSPLIT_3G_OPT
1475 bool "3G/1G user/kernel split (for full 1G low memory)"
1473 config VMSPLIT_2G 1476 config VMSPLIT_2G
1474 bool "2G/2G user/kernel split" 1477 bool "2G/2G user/kernel split"
1475 config VMSPLIT_1G 1478 config VMSPLIT_1G
@@ -1481,6 +1484,7 @@ config PAGE_OFFSET
1481 default PHYS_OFFSET if !MMU 1484 default PHYS_OFFSET if !MMU
1482 default 0x40000000 if VMSPLIT_1G 1485 default 0x40000000 if VMSPLIT_1G
1483 default 0x80000000 if VMSPLIT_2G 1486 default 0x80000000 if VMSPLIT_2G
1487 default 0xB0000000 if VMSPLIT_3G_OPT
1484 default 0xC0000000 1488 default 0xC0000000
1485 1489
1486config NR_CPUS 1490config NR_CPUS
@@ -1695,8 +1699,9 @@ config HIGHMEM
1695 If unsure, say n. 1699 If unsure, say n.
1696 1700
1697config HIGHPTE 1701config HIGHPTE
1698 bool "Allocate 2nd-level pagetables from highmem" 1702 bool "Allocate 2nd-level pagetables from highmem" if EXPERT
1699 depends on HIGHMEM 1703 depends on HIGHMEM
1704 default y
1700 help 1705 help
1701 The VM uses one page of physical memory for each page table. 1706 The VM uses one page of physical memory for each page table.
1702 For systems with a lot of processes, this can use a lot of 1707 For systems with a lot of processes, this can use a lot of
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 568adf5efde0..d55e3ea89fda 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -402,11 +402,12 @@
402 /* SMPS9 unused */ 402 /* SMPS9 unused */
403 403
404 ldo1_reg: ldo1 { 404 ldo1_reg: ldo1 {
405 /* VDD_SD */ 405 /* VDD_SD / VDDSHV8 */
406 regulator-name = "ldo1"; 406 regulator-name = "ldo1";
407 regulator-min-microvolt = <1800000>; 407 regulator-min-microvolt = <1800000>;
408 regulator-max-microvolt = <3300000>; 408 regulator-max-microvolt = <3300000>;
409 regulator-boot-on; 409 regulator-boot-on;
410 regulator-always-on;
410 }; 411 };
411 412
412 ldo2_reg: ldo2 { 413 ldo2_reg: ldo2 {
diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts
index 89f5a95954ed..4047621b137e 100644
--- a/arch/arm/boot/dts/armada-385-db-ap.dts
+++ b/arch/arm/boot/dts/armada-385-db-ap.dts
@@ -46,7 +46,7 @@
46 46
47/ { 47/ {
48 model = "Marvell Armada 385 Access Point Development Board"; 48 model = "Marvell Armada 385 Access Point Development Board";
49 compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x"; 49 compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada380";
50 50
51 chosen { 51 chosen {
52 stdout-path = "serial1:115200n8"; 52 stdout-path = "serial1:115200n8";
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 63a48490e2f9..d4dbd28d348c 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -152,7 +152,7 @@
152 }; 152 };
153 153
154 usb_phy2: phy@a2f400 { 154 usb_phy2: phy@a2f400 {
155 compatible = "marvell,berlin2-usb-phy"; 155 compatible = "marvell,berlin2cd-usb-phy";
156 reg = <0xa2f400 0x128>; 156 reg = <0xa2f400 0x128>;
157 #phy-cells = <0>; 157 #phy-cells = <0>;
158 resets = <&chip_rst 0x104 14>; 158 resets = <&chip_rst 0x104 14>;
@@ -170,7 +170,7 @@
170 }; 170 };
171 171
172 usb_phy0: phy@b74000 { 172 usb_phy0: phy@b74000 {
173 compatible = "marvell,berlin2-usb-phy"; 173 compatible = "marvell,berlin2cd-usb-phy";
174 reg = <0xb74000 0x128>; 174 reg = <0xb74000 0x128>;
175 #phy-cells = <0>; 175 #phy-cells = <0>;
176 resets = <&chip_rst 0x104 12>; 176 resets = <&chip_rst 0x104 12>;
@@ -178,7 +178,7 @@
178 }; 178 };
179 179
180 usb_phy1: phy@b78000 { 180 usb_phy1: phy@b78000 {
181 compatible = "marvell,berlin2-usb-phy"; 181 compatible = "marvell,berlin2cd-usb-phy";
182 reg = <0xb78000 0x128>; 182 reg = <0xb78000 0x128>;
183 #phy-cells = <0>; 183 #phy-cells = <0>;
184 resets = <&chip_rst 0x104 13>; 184 resets = <&chip_rst 0x104 13>;
diff --git a/arch/arm/boot/dts/emev2-kzm9d.dts b/arch/arm/boot/dts/emev2-kzm9d.dts
index 955c24ee4a8c..8c24975e8f9d 100644
--- a/arch/arm/boot/dts/emev2-kzm9d.dts
+++ b/arch/arm/boot/dts/emev2-kzm9d.dts
@@ -35,28 +35,28 @@
35 35
36 button@1 { 36 button@1 {
37 debounce_interval = <50>; 37 debounce_interval = <50>;
38 wakeup = <1>; 38 wakeup-source;
39 label = "DSW2-1"; 39 label = "DSW2-1";
40 linux,code = <KEY_1>; 40 linux,code = <KEY_1>;
41 gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>; 41 gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
42 }; 42 };
43 button@2 { 43 button@2 {
44 debounce_interval = <50>; 44 debounce_interval = <50>;
45 wakeup = <1>; 45 wakeup-source;
46 label = "DSW2-2"; 46 label = "DSW2-2";
47 linux,code = <KEY_2>; 47 linux,code = <KEY_2>;
48 gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>; 48 gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
49 }; 49 };
50 button@3 { 50 button@3 {
51 debounce_interval = <50>; 51 debounce_interval = <50>;
52 wakeup = <1>; 52 wakeup-source;
53 label = "DSW2-3"; 53 label = "DSW2-3";
54 linux,code = <KEY_3>; 54 linux,code = <KEY_3>;
55 gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>; 55 gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
56 }; 56 };
57 button@4 { 57 button@4 {
58 debounce_interval = <50>; 58 debounce_interval = <50>;
59 wakeup = <1>; 59 wakeup-source;
60 label = "DSW2-4"; 60 label = "DSW2-4";
61 linux,code = <KEY_4>; 61 linux,code = <KEY_4>;
62 gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>; 62 gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index 8f4d76c5e11c..1b95da79293c 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -915,6 +915,11 @@
915 }; 915 };
916}; 916};
917 917
918&pmu_system_controller {
919 assigned-clocks = <&pmu_system_controller 0>;
920 assigned-clock-parents = <&clock CLK_FIN_PLL>;
921};
922
918&rtc { 923&rtc {
919 status = "okay"; 924 status = "okay";
920 clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>; 925 clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 7d5b386b5ae6..8f40c7e549bd 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -878,6 +878,11 @@
878 }; 878 };
879}; 879};
880 880
881&pmu_system_controller {
882 assigned-clocks = <&pmu_system_controller 0>;
883 assigned-clock-parents = <&clock CLK_FIN_PLL>;
884};
885
881&rtc { 886&rtc {
882 status = "okay"; 887 status = "okay";
883 clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>; 888 clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index b738ce0f9d9b..6e444bb873f9 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -588,10 +588,10 @@
588 status = "disabled"; 588 status = "disabled";
589 }; 589 };
590 590
591 uart2: serial@30870000 { 591 uart2: serial@30890000 {
592 compatible = "fsl,imx7d-uart", 592 compatible = "fsl,imx7d-uart",
593 "fsl,imx6q-uart"; 593 "fsl,imx6q-uart";
594 reg = <0x30870000 0x10000>; 594 reg = <0x30890000 0x10000>;
595 interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; 595 interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
596 clocks = <&clks IMX7D_UART2_ROOT_CLK>, 596 clocks = <&clks IMX7D_UART2_ROOT_CLK>,
597 <&clks IMX7D_UART2_ROOT_CLK>; 597 <&clks IMX7D_UART2_ROOT_CLK>;
diff --git a/arch/arm/boot/dts/kirkwood-net5big.dts b/arch/arm/boot/dts/kirkwood-net5big.dts
index 36155b749d9f..d2d44df9c8c0 100644
--- a/arch/arm/boot/dts/kirkwood-net5big.dts
+++ b/arch/arm/boot/dts/kirkwood-net5big.dts
@@ -86,6 +86,66 @@
86 clock-frequency = <32768>; 86 clock-frequency = <32768>;
87 }; 87 };
88 }; 88 };
89
90 netxbig-leds {
91 blue-sata2 {
92 label = "netxbig:blue:sata2";
93 mode-addr = <5>;
94 mode-val = <NETXBIG_LED_OFF 0
95 NETXBIG_LED_ON 7
96 NETXBIG_LED_SATA 1
97 NETXBIG_LED_TIMER1 3>;
98 bright-addr = <2>;
99 max-brightness = <7>;
100 };
101 red-sata2 {
102 label = "netxbig:red:sata2";
103 mode-addr = <5>;
104 mode-val = <NETXBIG_LED_OFF 0
105 NETXBIG_LED_ON 2
106 NETXBIG_LED_TIMER1 4>;
107 bright-addr = <2>;
108 max-brightness = <7>;
109 };
110 blue-sata3 {
111 label = "netxbig:blue:sata3";
112 mode-addr = <6>;
113 mode-val = <NETXBIG_LED_OFF 0
114 NETXBIG_LED_ON 7
115 NETXBIG_LED_SATA 1
116 NETXBIG_LED_TIMER1 3>;
117 bright-addr = <2>;
118 max-brightness = <7>;
119 };
120 red-sata3 {
121 label = "netxbig:red:sata3";
122 mode-addr = <6>;
123 mode-val = <NETXBIG_LED_OFF 0
124 NETXBIG_LED_ON 2
125 NETXBIG_LED_TIMER1 4>;
126 bright-addr = <2>;
127 max-brightness = <7>;
128 };
129 blue-sata4 {
130 label = "netxbig:blue:sata4";
131 mode-addr = <7>;
132 mode-val = <NETXBIG_LED_OFF 0
133 NETXBIG_LED_ON 7
134 NETXBIG_LED_SATA 1
135 NETXBIG_LED_TIMER1 3>;
136 bright-addr = <2>;
137 max-brightness = <7>;
138 };
139 red-sata4 {
140 label = "netxbig:red:sata4";
141 mode-addr = <7>;
142 mode-val = <NETXBIG_LED_OFF 0
143 NETXBIG_LED_ON 2
144 NETXBIG_LED_TIMER1 4>;
145 bright-addr = <2>;
146 max-brightness = <7>;
147 };
148 };
89}; 149};
90 150
91&mdio { 151&mdio {
diff --git a/arch/arm/boot/dts/kirkwood-netxbig.dtsi b/arch/arm/boot/dts/kirkwood-netxbig.dtsi
index 1508b12147df..62515a8b99b9 100644
--- a/arch/arm/boot/dts/kirkwood-netxbig.dtsi
+++ b/arch/arm/boot/dts/kirkwood-netxbig.dtsi
@@ -13,6 +13,7 @@
13 * warranty of any kind, whether express or implied. 13 * warranty of any kind, whether express or implied.
14*/ 14*/
15 15
16#include <dt-bindings/leds/leds-netxbig.h>
16#include "kirkwood.dtsi" 17#include "kirkwood.dtsi"
17#include "kirkwood-6281.dtsi" 18#include "kirkwood-6281.dtsi"
18 19
@@ -105,6 +106,85 @@
105 gpio = <&gpio0 16 GPIO_ACTIVE_HIGH>; 106 gpio = <&gpio0 16 GPIO_ACTIVE_HIGH>;
106 }; 107 };
107 }; 108 };
109
110 netxbig_gpio_ext: netxbig-gpio-ext {
111 compatible = "lacie,netxbig-gpio-ext";
112
113 addr-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH
114 &gpio1 16 GPIO_ACTIVE_HIGH
115 &gpio1 17 GPIO_ACTIVE_HIGH>;
116 data-gpios = <&gpio1 12 GPIO_ACTIVE_HIGH
117 &gpio1 13 GPIO_ACTIVE_HIGH
118 &gpio1 14 GPIO_ACTIVE_HIGH>;
119 enable-gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>;
120 };
121
122 netxbig-leds {
123 compatible = "lacie,netxbig-leds";
124
125 gpio-ext = <&netxbig_gpio_ext>;
126
127 timers = <NETXBIG_LED_TIMER1 500 500
128 NETXBIG_LED_TIMER2 500 1000>;
129
130 blue-power {
131 label = "netxbig:blue:power";
132 mode-addr = <0>;
133 mode-val = <NETXBIG_LED_OFF 0
134 NETXBIG_LED_ON 1
135 NETXBIG_LED_TIMER1 3
136 NETXBIG_LED_TIMER2 7>;
137 bright-addr = <1>;
138 max-brightness = <7>;
139 };
140 red-power {
141 label = "netxbig:red:power";
142 mode-addr = <0>;
143 mode-val = <NETXBIG_LED_OFF 0
144 NETXBIG_LED_ON 2
145 NETXBIG_LED_TIMER1 4>;
146 bright-addr = <1>;
147 max-brightness = <7>;
148 };
149 blue-sata0 {
150 label = "netxbig:blue:sata0";
151 mode-addr = <3>;
152 mode-val = <NETXBIG_LED_OFF 0
153 NETXBIG_LED_ON 7
154 NETXBIG_LED_SATA 1
155 NETXBIG_LED_TIMER1 3>;
156 bright-addr = <2>;
157 max-brightness = <7>;
158 };
159 red-sata0 {
160 label = "netxbig:red:sata0";
161 mode-addr = <3>;
162 mode-val = <NETXBIG_LED_OFF 0
163 NETXBIG_LED_ON 2
164 NETXBIG_LED_TIMER1 4>;
165 bright-addr = <2>;
166 max-brightness = <7>;
167 };
168 blue-sata1 {
169 label = "netxbig:blue:sata1";
170 mode-addr = <4>;
171 mode-val = <NETXBIG_LED_OFF 0
172 NETXBIG_LED_ON 7
173 NETXBIG_LED_SATA 1
174 NETXBIG_LED_TIMER1 3>;
175 bright-addr = <2>;
176 max-brightness = <7>;
177 };
178 red-sata1 {
179 label = "netxbig:red:sata1";
180 mode-addr = <4>;
181 mode-val = <NETXBIG_LED_OFF 0
182 NETXBIG_LED_ON 2
183 NETXBIG_LED_TIMER1 4>;
184 bright-addr = <2>;
185 max-brightness = <7>;
186 };
187 };
108}; 188};
109 189
110&mdio { 190&mdio {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 91146c318798..5b0430041ec6 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -12,7 +12,7 @@
12 12
13/ { 13/ {
14 model = "LogicPD Zoom DM3730 Torpedo Development Kit"; 14 model = "LogicPD Zoom DM3730 Torpedo Development Kit";
15 compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap36xx"; 15 compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3630", "ti,omap3";
16 16
17 gpio_keys { 17 gpio_keys {
18 compatible = "gpio-keys"; 18 compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 548441384d2a..8c77c87660cd 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -67,7 +67,7 @@
67 67
68 timer@c1109940 { 68 timer@c1109940 {
69 compatible = "amlogic,meson6-timer"; 69 compatible = "amlogic,meson6-timer";
70 reg = <0xc1109940 0x14>; 70 reg = <0xc1109940 0x18>;
71 interrupts = <0 10 1>; 71 interrupts = <0 10 1>;
72 }; 72 };
73 73
@@ -80,36 +80,37 @@
80 wdt: watchdog@c1109900 { 80 wdt: watchdog@c1109900 {
81 compatible = "amlogic,meson6-wdt"; 81 compatible = "amlogic,meson6-wdt";
82 reg = <0xc1109900 0x8>; 82 reg = <0xc1109900 0x8>;
83 interrupts = <0 0 1>;
83 }; 84 };
84 85
85 uart_AO: serial@c81004c0 { 86 uart_AO: serial@c81004c0 {
86 compatible = "amlogic,meson-uart"; 87 compatible = "amlogic,meson-uart";
87 reg = <0xc81004c0 0x14>; 88 reg = <0xc81004c0 0x18>;
88 interrupts = <0 90 1>; 89 interrupts = <0 90 1>;
89 clocks = <&clk81>; 90 clocks = <&clk81>;
90 status = "disabled"; 91 status = "disabled";
91 }; 92 };
92 93
93 uart_A: serial@c81084c0 { 94 uart_A: serial@c11084c0 {
94 compatible = "amlogic,meson-uart"; 95 compatible = "amlogic,meson-uart";
95 reg = <0xc81084c0 0x14>; 96 reg = <0xc11084c0 0x18>;
96 interrupts = <0 90 1>; 97 interrupts = <0 26 1>;
97 clocks = <&clk81>; 98 clocks = <&clk81>;
98 status = "disabled"; 99 status = "disabled";
99 }; 100 };
100 101
101 uart_B: serial@c81084dc { 102 uart_B: serial@c11084dc {
102 compatible = "amlogic,meson-uart"; 103 compatible = "amlogic,meson-uart";
103 reg = <0xc81084dc 0x14>; 104 reg = <0xc11084dc 0x18>;
104 interrupts = <0 90 1>; 105 interrupts = <0 75 1>;
105 clocks = <&clk81>; 106 clocks = <&clk81>;
106 status = "disabled"; 107 status = "disabled";
107 }; 108 };
108 109
109 uart_C: serial@c8108700 { 110 uart_C: serial@c1108700 {
110 compatible = "amlogic,meson-uart"; 111 compatible = "amlogic,meson-uart";
111 reg = <0xc8108700 0x14>; 112 reg = <0xc1108700 0x18>;
112 interrupts = <0 90 1>; 113 interrupts = <0 93 1>;
113 clocks = <&clk81>; 114 clocks = <&clk81>;
114 status = "disabled"; 115 status = "disabled";
115 }; 116 };
diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts
index 16e8ce350dda..bb339d1648e0 100644
--- a/arch/arm/boot/dts/omap3-evm-37xx.dts
+++ b/arch/arm/boot/dts/omap3-evm-37xx.dts
@@ -13,7 +13,7 @@
13 13
14/ { 14/ {
15 model = "TI OMAP37XX EVM (TMDSEVM3730)"; 15 model = "TI OMAP37XX EVM (TMDSEVM3730)";
16 compatible = "ti,omap3-evm-37xx", "ti,omap36xx"; 16 compatible = "ti,omap3-evm-37xx", "ti,omap3630", "ti,omap3";
17 17
18 memory { 18 memory {
19 device_type = "memory"; 19 device_type = "memory";
diff --git a/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi b/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi
index b5334ecff13c..fec076eb7aef 100644
--- a/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi
@@ -90,7 +90,7 @@
90 regulators { 90 regulators {
91 vccio_sd: LDO_REG4 { 91 vccio_sd: LDO_REG4 {
92 regulator-name = "vccio_sd"; 92 regulator-name = "vccio_sd";
93 regulator-min-microvolt = <3300000>; 93 regulator-min-microvolt = <1800000>;
94 regulator-max-microvolt = <3300000>; 94 regulator-max-microvolt = <3300000>;
95 regulator-state-mem { 95 regulator-state-mem {
96 regulator-off-in-suspend; 96 regulator-off-in-suspend;
@@ -116,7 +116,12 @@
116 cap-sd-highspeed; 116 cap-sd-highspeed;
117 card-detect-delay = <200>; 117 card-detect-delay = <200>;
118 cd-gpios = <&gpio7 5 GPIO_ACTIVE_LOW>; 118 cd-gpios = <&gpio7 5 GPIO_ACTIVE_LOW>;
119 rockchip,default-sample-phase = <90>;
119 num-slots = <1>; 120 num-slots = <1>;
121 sd-uhs-sdr12;
122 sd-uhs-sdr25;
123 sd-uhs-sdr50;
124 sd-uhs-sdr104;
120 vmmc-supply = <&vcc33_sd>; 125 vmmc-supply = <&vcc33_sd>;
121 vqmmc-supply = <&vccio_sd>; 126 vqmmc-supply = <&vccio_sd>;
122}; 127};
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 275c78ccc0f3..860cea0a7613 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -149,7 +149,9 @@
149 broken-cd; 149 broken-cd;
150 bus-width = <8>; 150 bus-width = <8>;
151 cap-mmc-highspeed; 151 cap-mmc-highspeed;
152 rockchip,default-sample-phase = <158>;
152 disable-wp; 153 disable-wp;
154 mmc-hs200-1_8v;
153 mmc-pwrseq = <&emmc_pwrseq>; 155 mmc-pwrseq = <&emmc_pwrseq>;
154 non-removable; 156 non-removable;
155 num-slots = <1>; 157 num-slots = <1>;
@@ -355,6 +357,10 @@
355 num-slots = <1>; 357 num-slots = <1>;
356 pinctrl-names = "default"; 358 pinctrl-names = "default";
357 pinctrl-0 = <&sdio0_clk &sdio0_cmd &sdio0_bus4>; 359 pinctrl-0 = <&sdio0_clk &sdio0_cmd &sdio0_bus4>;
360 sd-uhs-sdr12;
361 sd-uhs-sdr25;
362 sd-uhs-sdr50;
363 sd-uhs-sdr104;
358 vmmc-supply = <&vcc33_sys>; 364 vmmc-supply = <&vcc33_sys>;
359 vqmmc-supply = <&vcc18_wl>; 365 vqmmc-supply = <&vcc18_wl>;
360}; 366};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 906e938fb6bf..4e7c6b7392af 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -222,8 +222,9 @@
222 sdmmc: dwmmc@ff0c0000 { 222 sdmmc: dwmmc@ff0c0000 {
223 compatible = "rockchip,rk3288-dw-mshc"; 223 compatible = "rockchip,rk3288-dw-mshc";
224 clock-freq-min-max = <400000 150000000>; 224 clock-freq-min-max = <400000 150000000>;
225 clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>; 225 clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
226 clock-names = "biu", "ciu"; 226 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
227 clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
227 fifo-depth = <0x100>; 228 fifo-depth = <0x100>;
228 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; 229 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
229 reg = <0xff0c0000 0x4000>; 230 reg = <0xff0c0000 0x4000>;
@@ -233,8 +234,9 @@
233 sdio0: dwmmc@ff0d0000 { 234 sdio0: dwmmc@ff0d0000 {
234 compatible = "rockchip,rk3288-dw-mshc"; 235 compatible = "rockchip,rk3288-dw-mshc";
235 clock-freq-min-max = <400000 150000000>; 236 clock-freq-min-max = <400000 150000000>;
236 clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>; 237 clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
237 clock-names = "biu", "ciu"; 238 <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
239 clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
238 fifo-depth = <0x100>; 240 fifo-depth = <0x100>;
239 interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; 241 interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
240 reg = <0xff0d0000 0x4000>; 242 reg = <0xff0d0000 0x4000>;
@@ -244,8 +246,9 @@
244 sdio1: dwmmc@ff0e0000 { 246 sdio1: dwmmc@ff0e0000 {
245 compatible = "rockchip,rk3288-dw-mshc"; 247 compatible = "rockchip,rk3288-dw-mshc";
246 clock-freq-min-max = <400000 150000000>; 248 clock-freq-min-max = <400000 150000000>;
247 clocks = <&cru HCLK_SDIO1>, <&cru SCLK_SDIO1>; 249 clocks = <&cru HCLK_SDIO1>, <&cru SCLK_SDIO1>,
248 clock-names = "biu", "ciu"; 250 <&cru SCLK_SDIO1_DRV>, <&cru SCLK_SDIO1_SAMPLE>;
251 clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
249 fifo-depth = <0x100>; 252 fifo-depth = <0x100>;
250 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>; 253 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
251 reg = <0xff0e0000 0x4000>; 254 reg = <0xff0e0000 0x4000>;
@@ -255,8 +258,9 @@
255 emmc: dwmmc@ff0f0000 { 258 emmc: dwmmc@ff0f0000 {
256 compatible = "rockchip,rk3288-dw-mshc"; 259 compatible = "rockchip,rk3288-dw-mshc";
257 clock-freq-min-max = <400000 150000000>; 260 clock-freq-min-max = <400000 150000000>;
258 clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>; 261 clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
259 clock-names = "biu", "ciu"; 262 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
263 clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
260 fifo-depth = <0x100>; 264 fifo-depth = <0x100>;
261 interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>; 265 interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
262 reg = <0xff0f0000 0x4000>; 266 reg = <0xff0f0000 0x4000>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 034cd48ae28b..cc05cde0f9a4 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -921,6 +921,20 @@
921 clocks = <&twi1_clk>; 921 clocks = <&twi1_clk>;
922 status = "disabled"; 922 status = "disabled";
923 }; 923 };
924
925 pioA: pinctrl@fc038000 {
926 compatible = "atmel,sama5d2-pinctrl";
927 reg = <0xfc038000 0x600>;
928 interrupts = <18 IRQ_TYPE_LEVEL_HIGH 7>,
929 <68 IRQ_TYPE_LEVEL_HIGH 7>,
930 <69 IRQ_TYPE_LEVEL_HIGH 7>,
931 <70 IRQ_TYPE_LEVEL_HIGH 7>;
932 interrupt-controller;
933 #interrupt-cells = <2>;
934 gpio-controller;
935 #gpio-cells = <2>;
936 clocks = <&pioA_clk>;
937 };
924 }; 938 };
925 }; 939 };
926}; 940};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
index 810cda743b6d..9c2387b34d0c 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -56,7 +56,7 @@
56 /* VMMCI level-shifter enable */ 56 /* VMMCI level-shifter enable */
57 default_hrefv60_cfg2 { 57 default_hrefv60_cfg2 {
58 pins = "GPIO169_D22"; 58 pins = "GPIO169_D22";
59 ste,config = <&gpio_out_lo>; 59 ste,config = <&gpio_out_hi>;
60 }; 60 };
61 /* VMMCI level-shifter voltage select */ 61 /* VMMCI level-shifter voltage select */
62 default_hrefv60_cfg3 { 62 default_hrefv60_cfg3 {
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 32a5ccb14e7e..e80e42163883 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -47,35 +47,35 @@
47 47
48 button@1 { 48 button@1 {
49 debounce_interval = <50>; 49 debounce_interval = <50>;
50 wakeup = <1>; 50 wakeup-source;
51 linux,code = <2>; 51 linux,code = <2>;
52 label = "userpb"; 52 label = "userpb";
53 gpios = <&gpio1 0 0x4>; 53 gpios = <&gpio1 0 0x4>;
54 }; 54 };
55 button@2 { 55 button@2 {
56 debounce_interval = <50>; 56 debounce_interval = <50>;
57 wakeup = <1>; 57 wakeup-source;
58 linux,code = <3>; 58 linux,code = <3>;
59 label = "extkb1"; 59 label = "extkb1";
60 gpios = <&gpio4 23 0x4>; 60 gpios = <&gpio4 23 0x4>;
61 }; 61 };
62 button@3 { 62 button@3 {
63 debounce_interval = <50>; 63 debounce_interval = <50>;
64 wakeup = <1>; 64 wakeup-source;
65 linux,code = <4>; 65 linux,code = <4>;
66 label = "extkb2"; 66 label = "extkb2";
67 gpios = <&gpio4 24 0x4>; 67 gpios = <&gpio4 24 0x4>;
68 }; 68 };
69 button@4 { 69 button@4 {
70 debounce_interval = <50>; 70 debounce_interval = <50>;
71 wakeup = <1>; 71 wakeup-source;
72 linux,code = <5>; 72 linux,code = <5>;
73 label = "extkb3"; 73 label = "extkb3";
74 gpios = <&gpio5 1 0x4>; 74 gpios = <&gpio5 1 0x4>;
75 }; 75 };
76 button@5 { 76 button@5 {
77 debounce_interval = <50>; 77 debounce_interval = <50>;
78 wakeup = <1>; 78 wakeup-source;
79 linux,code = <6>; 79 linux,code = <6>;
80 label = "extkb4"; 80 label = "extkb4";
81 gpios = <&gpio5 2 0x4>; 81 gpios = <&gpio5 2 0x4>;
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 9d4f86e9c50a..d845bd1448b5 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -234,7 +234,9 @@
234 gpio-controller; 234 gpio-controller;
235 #interrupt-cells = <2>; 235 #interrupt-cells = <2>;
236 interrupt-controller; 236 interrupt-controller;
237 /*
237 gpio-ranges = <&pinmux 0 0 246>; 238 gpio-ranges = <&pinmux 0 0 246>;
239 */
238 }; 240 };
239 241
240 apbmisc@70000800 { 242 apbmisc@70000800 {
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 1e204a6de12c..819e2ae2cabe 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -258,7 +258,9 @@
258 gpio-controller; 258 gpio-controller;
259 #interrupt-cells = <2>; 259 #interrupt-cells = <2>;
260 interrupt-controller; 260 interrupt-controller;
261 /*
261 gpio-ranges = <&pinmux 0 0 251>; 262 gpio-ranges = <&pinmux 0 0 251>;
263 */
262 }; 264 };
263 265
264 apbdma: dma@0,60020000 { 266 apbdma: dma@0,60020000 {
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index e058709e6d98..969b828505ae 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -244,7 +244,9 @@
244 gpio-controller; 244 gpio-controller;
245 #interrupt-cells = <2>; 245 #interrupt-cells = <2>;
246 interrupt-controller; 246 interrupt-controller;
247 /*
247 gpio-ranges = <&pinmux 0 0 224>; 248 gpio-ranges = <&pinmux 0 0 224>;
249 */
248 }; 250 };
249 251
250 apbmisc@70000800 { 252 apbmisc@70000800 {
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index fe04fb5e155f..c6938ad1b543 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -349,7 +349,9 @@
349 gpio-controller; 349 gpio-controller;
350 #interrupt-cells = <2>; 350 #interrupt-cells = <2>;
351 interrupt-controller; 351 interrupt-controller;
352 /*
352 gpio-ranges = <&pinmux 0 0 248>; 353 gpio-ranges = <&pinmux 0 0 248>;
354 */
353 }; 355 };
354 356
355 apbmisc@70000800 { 357 apbmisc@70000800 {
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts b/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
index 33963acd7e8f..f80f772d99fb 100644
--- a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
@@ -85,7 +85,7 @@
85}; 85};
86 86
87&ethsc { 87&ethsc {
88 interrupts = <0 50 4>; 88 interrupts = <0 52 4>;
89}; 89};
90 90
91&serial0 { 91&serial0 {
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 1ff2bfa2e183..13ba48c4b03b 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -166,7 +166,6 @@ CONFIG_MMC_SDHCI=y
166CONFIG_MMC_SDHCI_S3C=y 166CONFIG_MMC_SDHCI_S3C=y
167CONFIG_MMC_SDHCI_S3C_DMA=y 167CONFIG_MMC_SDHCI_S3C_DMA=y
168CONFIG_MMC_DW=y 168CONFIG_MMC_DW=y
169CONFIG_MMC_DW_IDMAC=y
170CONFIG_MMC_DW_EXYNOS=y 169CONFIG_MMC_DW_EXYNOS=y
171CONFIG_RTC_CLASS=y 170CONFIG_RTC_CLASS=y
172CONFIG_RTC_DRV_MAX77686=y 171CONFIG_RTC_DRV_MAX77686=y
diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig
index 5997dbc69822..b2e340b272ee 100644
--- a/arch/arm/configs/hisi_defconfig
+++ b/arch/arm/configs/hisi_defconfig
@@ -69,7 +69,6 @@ CONFIG_NOP_USB_XCEIV=y
69CONFIG_MMC=y 69CONFIG_MMC=y
70CONFIG_RTC_CLASS=y 70CONFIG_RTC_CLASS=y
71CONFIG_MMC_DW=y 71CONFIG_MMC_DW=y
72CONFIG_MMC_DW_IDMAC=y
73CONFIG_MMC_DW_PLTFM=y 72CONFIG_MMC_DW_PLTFM=y
74CONFIG_RTC_DRV_PL031=y 73CONFIG_RTC_DRV_PL031=y
75CONFIG_DMADEVICES=y 74CONFIG_DMADEVICES=y
diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig
index 1c47f86c3970..b7e8cdab51f9 100644
--- a/arch/arm/configs/lpc18xx_defconfig
+++ b/arch/arm/configs/lpc18xx_defconfig
@@ -119,7 +119,6 @@ CONFIG_USB_EHCI_HCD=y
119CONFIG_USB_EHCI_ROOT_HUB_TT=y 119CONFIG_USB_EHCI_ROOT_HUB_TT=y
120CONFIG_MMC=y 120CONFIG_MMC=y
121CONFIG_MMC_DW=y 121CONFIG_MMC_DW=y
122CONFIG_MMC_DW_IDMAC=y
123CONFIG_NEW_LEDS=y 122CONFIG_NEW_LEDS=y
124CONFIG_LEDS_CLASS=y 123CONFIG_LEDS_CLASS=y
125CONFIG_LEDS_PCA9532=y 124CONFIG_LEDS_PCA9532=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
new file mode 100644
index 000000000000..6607d976e07d
--- /dev/null
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -0,0 +1,188 @@
1/*
2 * arch/arm/include/asm/arch_gicv3.h
3 *
4 * Copyright (C) 2015 ARM Ltd.
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_ARCH_GICV3_H
19#define __ASM_ARCH_GICV3_H
20
21#ifndef __ASSEMBLY__
22
23#include <linux/io.h>
24
25#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
26#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
27
28#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
29#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1)
30#define ICC_IAR1 __ACCESS_CP15(c12, 0, c12, 0)
31#define ICC_SGI1R __ACCESS_CP15_64(0, c12)
32#define ICC_PMR __ACCESS_CP15(c4, 0, c6, 0)
33#define ICC_CTLR __ACCESS_CP15(c12, 0, c12, 4)
34#define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5)
35#define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7)
36
37#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5)
38
39#define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4)
40#define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0)
41#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1)
42#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2)
43#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3)
44#define ICH_ELSR __ACCESS_CP15(c12, 4, c11, 5)
45#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7)
46
47#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x)
48#define __LR8(x) __ACCESS_CP15(c12, 4, c13, x)
49
50#define ICH_LR0 __LR0(0)
51#define ICH_LR1 __LR0(1)
52#define ICH_LR2 __LR0(2)
53#define ICH_LR3 __LR0(3)
54#define ICH_LR4 __LR0(4)
55#define ICH_LR5 __LR0(5)
56#define ICH_LR6 __LR0(6)
57#define ICH_LR7 __LR0(7)
58#define ICH_LR8 __LR8(0)
59#define ICH_LR9 __LR8(1)
60#define ICH_LR10 __LR8(2)
61#define ICH_LR11 __LR8(3)
62#define ICH_LR12 __LR8(4)
63#define ICH_LR13 __LR8(5)
64#define ICH_LR14 __LR8(6)
65#define ICH_LR15 __LR8(7)
66
67/* LR top half */
68#define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x)
69#define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x)
70
71#define ICH_LRC0 __LRC0(0)
72#define ICH_LRC1 __LRC0(1)
73#define ICH_LRC2 __LRC0(2)
74#define ICH_LRC3 __LRC0(3)
75#define ICH_LRC4 __LRC0(4)
76#define ICH_LRC5 __LRC0(5)
77#define ICH_LRC6 __LRC0(6)
78#define ICH_LRC7 __LRC0(7)
79#define ICH_LRC8 __LRC8(0)
80#define ICH_LRC9 __LRC8(1)
81#define ICH_LRC10 __LRC8(2)
82#define ICH_LRC11 __LRC8(3)
83#define ICH_LRC12 __LRC8(4)
84#define ICH_LRC13 __LRC8(5)
85#define ICH_LRC14 __LRC8(6)
86#define ICH_LRC15 __LRC8(7)
87
88#define __AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x)
89#define ICH_AP0R0 __AP0Rx(0)
90#define ICH_AP0R1 __AP0Rx(1)
91#define ICH_AP0R2 __AP0Rx(2)
92#define ICH_AP0R3 __AP0Rx(3)
93
94#define __AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x)
95#define ICH_AP1R0 __AP1Rx(0)
96#define ICH_AP1R1 __AP1Rx(1)
97#define ICH_AP1R2 __AP1Rx(2)
98#define ICH_AP1R3 __AP1Rx(3)
99
100/* Low-level accessors */
101
102static inline void gic_write_eoir(u32 irq)
103{
104 asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq));
105 isb();
106}
107
108static inline void gic_write_dir(u32 val)
109{
110 asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val));
111 isb();
112}
113
114static inline u32 gic_read_iar(void)
115{
116 u32 irqstat;
117
118 asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
119 return irqstat;
120}
121
122static inline void gic_write_pmr(u32 val)
123{
124 asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val));
125}
126
127static inline void gic_write_ctlr(u32 val)
128{
129 asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val));
130 isb();
131}
132
133static inline void gic_write_grpen1(u32 val)
134{
135 asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val));
136 isb();
137}
138
139static inline void gic_write_sgi1r(u64 val)
140{
141 asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val));
142}
143
144static inline u32 gic_read_sre(void)
145{
146 u32 val;
147
148 asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val));
149 return val;
150}
151
152static inline void gic_write_sre(u32 val)
153{
154 asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val));
155 isb();
156}
157
158/*
159 * Even in 32bit systems that use LPAE, there is no guarantee that the I/O
160 * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
161 * make much sense.
162 * Moreover, 64bit I/O emulation is extremely difficult to implement on
163 * AArch32, since the syndrome register doesn't provide any information for
164 * them.
165 * Consequently, the following IO helpers use 32bit accesses.
166 *
167 * There are only two registers that need 64bit accesses in this driver:
168 * - GICD_IROUTERn, contain the affinity values associated to each interrupt.
169 * The upper-word (aff3) will always be 0, so there is no need for a lock.
170 * - GICR_TYPER is an ID register and doesn't need atomicity.
171 */
172static inline void gic_write_irouter(u64 val, volatile void __iomem *addr)
173{
174 writel_relaxed((u32)val, addr);
175 writel_relaxed((u32)(val >> 32), addr + 4);
176}
177
178static inline u64 gic_read_typer(const volatile void __iomem *addr)
179{
180 u64 val;
181
182 val = readl_relaxed(addr);
183 val |= (u64)readl_relaxed(addr + 4) << 32;
184 return val;
185}
186
187#endif /* !__ASSEMBLY__ */
188#endif /* !__ASM_ARCH_GICV3_H */
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index fe3ef397f5a4..9e10c4567eb4 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -27,8 +27,8 @@
27 * strex/ldrex monitor on some implementations. The reason we can use it for 27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return. 28 * atomic_set() is the clrex or dummy strex done on every exception return.
29 */ 29 */
30#define atomic_read(v) ACCESS_ONCE((v)->counter) 30#define atomic_read(v) READ_ONCE((v)->counter)
31#define atomic_set(v,i) (((v)->counter) = (i)) 31#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
32 32
33#if __LINUX_ARM_ARCH__ >= 6 33#if __LINUX_ARM_ARCH__ >= 6
34 34
@@ -210,8 +210,8 @@ ATOMIC_OP(xor, ^=, eor)
210 210
211#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) 211#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
212#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) 212#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
213#define atomic_inc_return(v) (atomic_add_return(1, v)) 213#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
214#define atomic_dec_return(v) (atomic_sub_return(1, v)) 214#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
215#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 215#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
216 216
217#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) 217#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
@@ -442,11 +442,11 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
442 442
443#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 443#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
444#define atomic64_inc(v) atomic64_add(1LL, (v)) 444#define atomic64_inc(v) atomic64_add(1LL, (v))
445#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) 445#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
446#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 446#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
447#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) 447#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
448#define atomic64_dec(v) atomic64_sub(1LL, (v)) 448#define atomic64_dec(v) atomic64_sub(1LL, (v))
449#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) 449#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
450#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 450#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
451#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) 451#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
452 452
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 916a2744d5c6..97882f9bad12 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -39,6 +39,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
39 39
40 switch (size) { 40 switch (size) {
41#if __LINUX_ARM_ARCH__ >= 6 41#if __LINUX_ARM_ARCH__ >= 6
42#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
42 case 1: 43 case 1:
43 asm volatile("@ __xchg1\n" 44 asm volatile("@ __xchg1\n"
44 "1: ldrexb %0, [%3]\n" 45 "1: ldrexb %0, [%3]\n"
@@ -49,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
49 : "r" (x), "r" (ptr) 50 : "r" (x), "r" (ptr)
50 : "memory", "cc"); 51 : "memory", "cc");
51 break; 52 break;
53 case 2:
54 asm volatile("@ __xchg2\n"
55 "1: ldrexh %0, [%3]\n"
56 " strexh %1, %2, [%3]\n"
57 " teq %1, #0\n"
58 " bne 1b"
59 : "=&r" (ret), "=&r" (tmp)
60 : "r" (x), "r" (ptr)
61 : "memory", "cc");
62 break;
63#endif
52 case 4: 64 case 4:
53 asm volatile("@ __xchg4\n" 65 asm volatile("@ __xchg4\n"
54 "1: ldrex %0, [%3]\n" 66 "1: ldrex %0, [%3]\n"
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
index 43908146a5cf..e6b70d9d084e 100644
--- a/arch/arm/include/asm/irqflags.h
+++ b/arch/arm/include/asm/irqflags.h
@@ -54,6 +54,14 @@ static inline void arch_local_irq_disable(void)
54 54
55#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") 55#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
56#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") 56#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
57
58#ifndef CONFIG_CPU_V7M
59#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc")
60#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc")
61#else
62#define local_abt_enable() do { } while (0)
63#define local_abt_disable() do { } while (0)
64#endif
57#else 65#else
58 66
59/* 67/*
@@ -136,6 +144,8 @@ static inline void arch_local_irq_disable(void)
136 : "memory", "cc"); \ 144 : "memory", "cc"); \
137 }) 145 })
138 146
147#define local_abt_enable() do { } while (0)
148#define local_abt_disable() do { } while (0)
139#endif 149#endif
140 150
141/* 151/*
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index cb3a40717edd..5c1ad11aa392 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -47,7 +47,7 @@ struct machine_desc {
47 unsigned l2c_aux_val; /* L2 cache aux value */ 47 unsigned l2c_aux_val; /* L2 cache aux value */
48 unsigned l2c_aux_mask; /* L2 cache aux mask */ 48 unsigned l2c_aux_mask; /* L2 cache aux mask */
49 void (*l2c_write_sec)(unsigned long, unsigned); 49 void (*l2c_write_sec)(unsigned long, unsigned);
50 struct smp_operations *smp; /* SMP operations */ 50 const struct smp_operations *smp; /* SMP operations */
51 bool (*smp_init)(void); 51 bool (*smp_init)(void);
52 void (*fixup)(struct tag *, char **); 52 void (*fixup)(struct tag *, char **);
53 void (*dt_fixup)(void); 53 void (*dt_fixup)(void);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 98d58bb04ac5..c79b57bf71c4 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -76,10 +76,12 @@
76 */ 76 */
77#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) 77#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
78 78
79#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
79/* 80/*
80 * Allow 16MB-aligned ioremap pages 81 * Allow 16MB-aligned ioremap pages
81 */ 82 */
82#define IOREMAP_MAX_ORDER 24 83#define IOREMAP_MAX_ORDER 24
84#endif
83 85
84#else /* CONFIG_MMU */ 86#else /* CONFIG_MMU */
85 87
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f40354198bad..348caabb7625 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -43,7 +43,7 @@
43 */ 43 */
44#define VMALLOC_OFFSET (8*1024*1024) 44#define VMALLOC_OFFSET (8*1024*1024)
45#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 45#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
46#define VMALLOC_END 0xff000000UL 46#define VMALLOC_END 0xff800000UL
47 47
48#define LIBRARY_TEXT_START 0x0c000000 48#define LIBRARY_TEXT_START 0x0c000000
49 49
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index ef356659b4f4..3d6dc8b460e4 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -112,7 +112,7 @@ struct smp_operations {
112 112
113struct of_cpu_method { 113struct of_cpu_method {
114 const char *method; 114 const char *method;
115 struct smp_operations *ops; 115 const struct smp_operations *ops;
116}; 116};
117 117
118#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \ 118#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
@@ -122,6 +122,6 @@ struct of_cpu_method {
122/* 122/*
123 * set platform specific SMP operations 123 * set platform specific SMP operations
124 */ 124 */
125extern void smp_set_ops(struct smp_operations *); 125extern void smp_set_ops(const struct smp_operations *);
126 126
127#endif /* ifndef __ASM_ARM_SMP_H */ 127#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 7cba573c2cc9..7b84657fba35 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -21,13 +21,6 @@
21 */ 21 */
22#define __NR_syscalls (392) 22#define __NR_syscalls (392)
23 23
24/*
25 * *NOTE*: This is a ghost syscall private to the kernel. Only the
26 * __kuser_cmpxchg code in entry-armv.S should be aware of its
27 * existence. Don't ever use this from user code.
28 */
29#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
30
31#define __ARCH_WANT_STAT64 24#define __ARCH_WANT_STAT64
32#define __ARCH_WANT_SYS_GETHOSTNAME 25#define __ARCH_WANT_SYS_GETHOSTNAME
33#define __ARCH_WANT_SYS_PAUSE 26#define __ARCH_WANT_SYS_PAUSE
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 11c54de9f8cf..65addcbf5b30 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -101,6 +101,7 @@ void __init arm_dt_init_cpu_maps(void)
101 if (of_property_read_u32(cpu, "reg", &hwid)) { 101 if (of_property_read_u32(cpu, "reg", &hwid)) {
102 pr_debug(" * %s missing reg property\n", 102 pr_debug(" * %s missing reg property\n",
103 cpu->full_name); 103 cpu->full_name);
104 of_node_put(cpu);
104 return; 105 return;
105 } 106 }
106 107
@@ -108,8 +109,10 @@ void __init arm_dt_init_cpu_maps(void)
108 * 8 MSBs must be set to 0 in the DT since the reg property 109 * 8 MSBs must be set to 0 in the DT since the reg property
109 * defines the MPIDR[23:0]. 110 * defines the MPIDR[23:0].
110 */ 111 */
111 if (hwid & ~MPIDR_HWID_BITMASK) 112 if (hwid & ~MPIDR_HWID_BITMASK) {
113 of_node_put(cpu);
112 return; 114 return;
115 }
113 116
114 /* 117 /*
115 * Duplicate MPIDRs are a recipe for disaster. 118 * Duplicate MPIDRs are a recipe for disaster.
@@ -119,9 +122,11 @@ void __init arm_dt_init_cpu_maps(void)
119 * to avoid matching valid MPIDR[23:0] values. 122 * to avoid matching valid MPIDR[23:0] values.
120 */ 123 */
121 for (j = 0; j < cpuidx; j++) 124 for (j = 0; j < cpuidx; j++)
122 if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg " 125 if (WARN(tmp_map[j] == hwid,
123 "properties in the DT\n")) 126 "Duplicate /cpu reg properties in the DT\n")) {
127 of_node_put(cpu);
124 return; 128 return;
129 }
125 130
126 /* 131 /*
127 * Build a stashed array of MPIDR values. Numbering scheme 132 * Build a stashed array of MPIDR values. Numbering scheme
@@ -143,6 +148,7 @@ void __init arm_dt_init_cpu_maps(void)
143 "max cores %u, capping them\n", 148 "max cores %u, capping them\n",
144 cpuidx, nr_cpu_ids)) { 149 cpuidx, nr_cpu_ids)) {
145 cpuidx = nr_cpu_ids; 150 cpuidx = nr_cpu_ids;
151 of_node_put(cpu);
146 break; 152 break;
147 } 153 }
148 154
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3e1c26eb32b4..3ce377f7251f 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -427,8 +427,7 @@ ENDPROC(__fiq_abt)
427 .endm 427 .endm
428 428
429 .macro kuser_cmpxchg_check 429 .macro kuser_cmpxchg_check
430#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ 430#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
431 !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
432#ifndef CONFIG_MMU 431#ifndef CONFIG_MMU
433#warning "NPTL on non MMU needs fixing" 432#warning "NPTL on non MMU needs fixing"
434#else 433#else
@@ -859,20 +858,7 @@ __kuser_helper_start:
859 858
860__kuser_cmpxchg64: @ 0xffff0f60 859__kuser_cmpxchg64: @ 0xffff0f60
861 860
862#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 861#if defined(CONFIG_CPU_32v6K)
863
864 /*
865 * Poor you. No fast solution possible...
866 * The kernel itself must perform the operation.
867 * A special ghost syscall is used for that (see traps.c).
868 */
869 stmfd sp!, {r7, lr}
870 ldr r7, 1f @ it's 20 bits
871 swi __ARM_NR_cmpxchg64
872 ldmfd sp!, {r7, pc}
8731: .word __ARM_NR_cmpxchg64
874
875#elif defined(CONFIG_CPU_32v6K)
876 862
877 stmfd sp!, {r4, r5, r6, r7} 863 stmfd sp!, {r4, r5, r6, r7}
878 ldrd r4, r5, [r0] @ load old val 864 ldrd r4, r5, [r0] @ load old val
@@ -948,20 +934,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
948 934
949__kuser_cmpxchg: @ 0xffff0fc0 935__kuser_cmpxchg: @ 0xffff0fc0
950 936
951#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 937#if __LINUX_ARM_ARCH__ < 6
952
953 /*
954 * Poor you. No fast solution possible...
955 * The kernel itself must perform the operation.
956 * A special ghost syscall is used for that (see traps.c).
957 */
958 stmfd sp!, {r7, lr}
959 ldr r7, 1f @ it's 20 bits
960 swi __ARM_NR_cmpxchg
961 ldmfd sp!, {r7, pc}
9621: .word __ARM_NR_cmpxchg
963
964#elif __LINUX_ARM_ARCH__ < 6
965 938
966#ifdef CONFIG_MMU 939#ifdef CONFIG_MMU
967 940
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index dc7d0a95bd36..6284779d64ee 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -35,7 +35,6 @@
35#include <asm/cputype.h> 35#include <asm/cputype.h>
36#include <asm/current.h> 36#include <asm/current.h>
37#include <asm/hw_breakpoint.h> 37#include <asm/hw_breakpoint.h>
38#include <asm/kdebug.h>
39#include <asm/traps.h> 38#include <asm/traps.h>
40 39
41/* Breakpoint currently in use for each BRP. */ 40/* Breakpoint currently in use for each BRP. */
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index fd9eefce0a7b..9232caee7060 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -74,7 +74,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
74void 74void
75sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) 75sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
76{ 76{
77 struct pt_regs *thread_regs; 77 struct thread_info *ti;
78 int regno; 78 int regno;
79 79
80 /* Just making sure... */ 80 /* Just making sure... */
@@ -86,24 +86,17 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
86 gdb_regs[regno] = 0; 86 gdb_regs[regno] = 0;
87 87
88 /* Otherwise, we have only some registers from switch_to() */ 88 /* Otherwise, we have only some registers from switch_to() */
89 thread_regs = task_pt_regs(task); 89 ti = task_thread_info(task);
90 gdb_regs[_R0] = thread_regs->ARM_r0; 90 gdb_regs[_R4] = ti->cpu_context.r4;
91 gdb_regs[_R1] = thread_regs->ARM_r1; 91 gdb_regs[_R5] = ti->cpu_context.r5;
92 gdb_regs[_R2] = thread_regs->ARM_r2; 92 gdb_regs[_R6] = ti->cpu_context.r6;
93 gdb_regs[_R3] = thread_regs->ARM_r3; 93 gdb_regs[_R7] = ti->cpu_context.r7;
94 gdb_regs[_R4] = thread_regs->ARM_r4; 94 gdb_regs[_R8] = ti->cpu_context.r8;
95 gdb_regs[_R5] = thread_regs->ARM_r5; 95 gdb_regs[_R9] = ti->cpu_context.r9;
96 gdb_regs[_R6] = thread_regs->ARM_r6; 96 gdb_regs[_R10] = ti->cpu_context.sl;
97 gdb_regs[_R7] = thread_regs->ARM_r7; 97 gdb_regs[_FP] = ti->cpu_context.fp;
98 gdb_regs[_R8] = thread_regs->ARM_r8; 98 gdb_regs[_SPT] = ti->cpu_context.sp;
99 gdb_regs[_R9] = thread_regs->ARM_r9; 99 gdb_regs[_PC] = ti->cpu_context.pc;
100 gdb_regs[_R10] = thread_regs->ARM_r10;
101 gdb_regs[_FP] = thread_regs->ARM_fp;
102 gdb_regs[_IP] = thread_regs->ARM_ip;
103 gdb_regs[_SPT] = thread_regs->ARM_sp;
104 gdb_regs[_LR] = thread_regs->ARM_lr;
105 gdb_regs[_PC] = thread_regs->ARM_pc;
106 gdb_regs[_CPSR] = thread_regs->ARM_cpsr;
107} 100}
108 101
109void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) 102void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 48185a773852..b26361355dae 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -80,7 +80,7 @@ static DECLARE_COMPLETION(cpu_running);
80 80
81static struct smp_operations smp_ops; 81static struct smp_operations smp_ops;
82 82
83void __init smp_set_ops(struct smp_operations *ops) 83void __init smp_set_ops(const struct smp_operations *ops)
84{ 84{
85 if (ops) 85 if (ops)
86 smp_ops = *ops; 86 smp_ops = *ops;
@@ -400,6 +400,7 @@ asmlinkage void secondary_start_kernel(void)
400 400
401 local_irq_enable(); 401 local_irq_enable();
402 local_fiq_enable(); 402 local_fiq_enable();
403 local_abt_enable();
403 404
404 /* 405 /*
405 * OK, it's off to the idle thread for us 406 * OK, it's off to the idle thread for us
@@ -748,6 +749,15 @@ core_initcall(register_cpufreq_notifier);
748 749
749static void raise_nmi(cpumask_t *mask) 750static void raise_nmi(cpumask_t *mask)
750{ 751{
752 /*
753 * Generate the backtrace directly if we are running in a calling
754 * context that is not preemptible by the backtrace IPI. Note
755 * that nmi_cpu_backtrace() automatically removes the current cpu
756 * from mask.
757 */
758 if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
759 nmi_cpu_backtrace(NULL);
760
751 smp_cross_call(mask, IPI_CPU_BACKTRACE); 761 smp_cross_call(mask, IPI_CPU_BACKTRACE);
752} 762}
753 763
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index e9035cda1485..1bfa7a7f5533 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -23,7 +23,6 @@
23#include <linux/of_irq.h> 23#include <linux/of_irq.h>
24#include <linux/of_address.h> 24#include <linux/of_address.h>
25 25
26#include <asm/smp_plat.h>
27#include <asm/smp_twd.h> 26#include <asm/smp_twd.h>
28 27
29/* set up by the platform code */ 28/* set up by the platform code */
@@ -34,6 +33,8 @@ static unsigned long twd_timer_rate;
34static DEFINE_PER_CPU(bool, percpu_setup_called); 33static DEFINE_PER_CPU(bool, percpu_setup_called);
35 34
36static struct clock_event_device __percpu *twd_evt; 35static struct clock_event_device __percpu *twd_evt;
36static unsigned int twd_features =
37 CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
37static int twd_ppi; 38static int twd_ppi;
38 39
39static int twd_shutdown(struct clock_event_device *clk) 40static int twd_shutdown(struct clock_event_device *clk)
@@ -294,8 +295,7 @@ static void twd_timer_setup(void)
294 writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); 295 writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
295 296
296 clk->name = "local_timer"; 297 clk->name = "local_timer";
297 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 298 clk->features = twd_features;
298 CLOCK_EVT_FEAT_C3STOP;
299 clk->rating = 350; 299 clk->rating = 350;
300 clk->set_state_shutdown = twd_shutdown; 300 clk->set_state_shutdown = twd_shutdown;
301 clk->set_state_periodic = twd_set_periodic; 301 clk->set_state_periodic = twd_set_periodic;
@@ -350,6 +350,8 @@ static int __init twd_local_timer_common_register(struct device_node *np)
350 goto out_irq; 350 goto out_irq;
351 351
352 twd_get_clock(np); 352 twd_get_clock(np);
353 if (!of_property_read_bool(np, "always-on"))
354 twd_features |= CLOCK_EVT_FEAT_C3STOP;
353 355
354 /* 356 /*
355 * Immediately configure the timer on the boot CPU, unless we need 357 * Immediately configure the timer on the boot CPU, unless we need
@@ -392,9 +394,6 @@ static void __init twd_local_timer_of_register(struct device_node *np)
392{ 394{
393 int err; 395 int err;
394 396
395 if (!is_smp() || !setup_max_cpus)
396 return;
397
398 twd_ppi = irq_of_parse_and_map(np, 0); 397 twd_ppi = irq_of_parse_and_map(np, 0);
399 if (!twd_ppi) { 398 if (!twd_ppi) {
400 err = -EINVAL; 399 err = -EINVAL;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 969f9d9e665f..bc698383e822 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -625,58 +625,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
625 set_tls(regs->ARM_r0); 625 set_tls(regs->ARM_r0);
626 return 0; 626 return 0;
627 627
628#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
629 /*
630 * Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
631 * Return zero in r0 if *MEM was changed or non-zero if no exchange
632 * happened. Also set the user C flag accordingly.
633 * If access permissions have to be fixed up then non-zero is
634 * returned and the operation has to be re-attempted.
635 *
636 * *NOTE*: This is a ghost syscall private to the kernel. Only the
637 * __kuser_cmpxchg code in entry-armv.S should be aware of its
638 * existence. Don't ever use this from user code.
639 */
640 case NR(cmpxchg):
641 for (;;) {
642 extern void do_DataAbort(unsigned long addr, unsigned int fsr,
643 struct pt_regs *regs);
644 unsigned long val;
645 unsigned long addr = regs->ARM_r2;
646 struct mm_struct *mm = current->mm;
647 pgd_t *pgd; pmd_t *pmd; pte_t *pte;
648 spinlock_t *ptl;
649
650 regs->ARM_cpsr &= ~PSR_C_BIT;
651 down_read(&mm->mmap_sem);
652 pgd = pgd_offset(mm, addr);
653 if (!pgd_present(*pgd))
654 goto bad_access;
655 pmd = pmd_offset(pgd, addr);
656 if (!pmd_present(*pmd))
657 goto bad_access;
658 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
659 if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
660 pte_unmap_unlock(pte, ptl);
661 goto bad_access;
662 }
663 val = *(unsigned long *)addr;
664 val -= regs->ARM_r0;
665 if (val == 0) {
666 *(unsigned long *)addr = regs->ARM_r1;
667 regs->ARM_cpsr |= PSR_C_BIT;
668 }
669 pte_unmap_unlock(pte, ptl);
670 up_read(&mm->mmap_sem);
671 return val;
672
673 bad_access:
674 up_read(&mm->mmap_sem);
675 /* simulate a write access fault */
676 do_DataAbort(addr, 15 + (1 << 11), regs);
677 }
678#endif
679
680 default: 628 default:
681 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS 629 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
682 if not implemented, rather than raising SIGILL. This 630 if not implemented, rather than raising SIGILL. This
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 210eccadb69a..356970f3b25e 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 depends on MMU && OF 21 depends on MMU && OF
22 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
23 select ANON_INODES 23 select ANON_INODES
24 select ARM_GIC
24 select HAVE_KVM_CPU_RELAX_INTERCEPT 25 select HAVE_KVM_CPU_RELAX_INTERCEPT
25 select HAVE_KVM_ARCH_TLB_FLUSH_ALL 26 select HAVE_KVM_ARCH_TLB_FLUSH_ALL
26 select KVM_MMIO 27 select KVM_MMIO
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index dc017adfddc8..78b286994577 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1080,7 +1080,7 @@ static int init_hyp_mode(void)
1080 */ 1080 */
1081 err = kvm_timer_hyp_init(); 1081 err = kvm_timer_hyp_init();
1082 if (err) 1082 if (err)
1083 goto out_free_mappings; 1083 goto out_free_context;
1084 1084
1085#ifndef CONFIG_HOTPLUG_CPU 1085#ifndef CONFIG_HOTPLUG_CPU
1086 free_boot_hyp_pgd(); 1086 free_boot_hyp_pgd();
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 970d6c043774..e936352ccb00 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/unwind.h>
12 13
13 .text 14 .text
14 15
@@ -20,6 +21,8 @@
20 */ 21 */
21ENTRY(__clear_user_std) 22ENTRY(__clear_user_std)
22WEAK(arm_clear_user) 23WEAK(arm_clear_user)
24UNWIND(.fnstart)
25UNWIND(.save {r1, lr})
23 stmfd sp!, {r1, lr} 26 stmfd sp!, {r1, lr}
24 mov r2, #0 27 mov r2, #0
25 cmp r1, #4 28 cmp r1, #4
@@ -44,6 +47,7 @@ WEAK(arm_clear_user)
44USER( strnebt r2, [r0]) 47USER( strnebt r2, [r0])
45 mov r0, #0 48 mov r0, #0
46 ldmfd sp!, {r1, pc} 49 ldmfd sp!, {r1, pc}
50UNWIND(.fnend)
47ENDPROC(arm_clear_user) 51ENDPROC(arm_clear_user)
48ENDPROC(__clear_user_std) 52ENDPROC(__clear_user_std)
49 53
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 4a87e86dec45..7c21760f590f 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -200,15 +200,15 @@ no_clk:
200 args.args_count = 0; 200 args.args_count = 0;
201 child_domain = of_genpd_get_from_provider(&args); 201 child_domain = of_genpd_get_from_provider(&args);
202 if (IS_ERR(child_domain)) 202 if (IS_ERR(child_domain))
203 goto next_pd; 203 continue;
204 204
205 if (of_parse_phandle_with_args(np, "power-domains", 205 if (of_parse_phandle_with_args(np, "power-domains",
206 "#power-domain-cells", 0, &args) != 0) 206 "#power-domain-cells", 0, &args) != 0)
207 goto next_pd; 207 continue;
208 208
209 parent_domain = of_genpd_get_from_provider(&args); 209 parent_domain = of_genpd_get_from_provider(&args);
210 if (IS_ERR(parent_domain)) 210 if (IS_ERR(parent_domain))
211 goto next_pd; 211 continue;
212 212
213 if (pm_genpd_add_subdomain(parent_domain, child_domain)) 213 if (pm_genpd_add_subdomain(parent_domain, child_domain))
214 pr_warn("%s failed to add subdomain: %s\n", 214 pr_warn("%s failed to add subdomain: %s\n",
@@ -216,8 +216,6 @@ no_clk:
216 else 216 else
217 pr_info("%s has as child subdomain: %s.\n", 217 pr_info("%s has as child subdomain: %s.\n",
218 parent_domain->name, child_domain->name); 218 parent_domain->name, child_domain->name);
219next_pd:
220 of_node_put(np);
221 } 219 }
222 220
223 return 0; 221 return 0;
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index e00eb39453a4..5a7e47ceec91 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -177,54 +177,57 @@ static struct irq_chip exynos_pmu_chip = {
177#endif 177#endif
178}; 178};
179 179
180static int exynos_pmu_domain_xlate(struct irq_domain *domain, 180static int exynos_pmu_domain_translate(struct irq_domain *d,
181 struct device_node *controller, 181 struct irq_fwspec *fwspec,
182 const u32 *intspec, 182 unsigned long *hwirq,
183 unsigned int intsize, 183 unsigned int *type)
184 unsigned long *out_hwirq,
185 unsigned int *out_type)
186{ 184{
187 if (domain->of_node != controller) 185 if (is_of_node(fwspec->fwnode)) {
188 return -EINVAL; /* Shouldn't happen, really... */ 186 if (fwspec->param_count != 3)
189 if (intsize != 3) 187 return -EINVAL;
190 return -EINVAL; /* Not GIC compliant */
191 if (intspec[0] != 0)
192 return -EINVAL; /* No PPI should point to this domain */
193 188
194 *out_hwirq = intspec[1]; 189 /* No PPI should point to this domain */
195 *out_type = intspec[2]; 190 if (fwspec->param[0] != 0)
196 return 0; 191 return -EINVAL;
192
193 *hwirq = fwspec->param[1];
194 *type = fwspec->param[2];
195 return 0;
196 }
197
198 return -EINVAL;
197} 199}
198 200
199static int exynos_pmu_domain_alloc(struct irq_domain *domain, 201static int exynos_pmu_domain_alloc(struct irq_domain *domain,
200 unsigned int virq, 202 unsigned int virq,
201 unsigned int nr_irqs, void *data) 203 unsigned int nr_irqs, void *data)
202{ 204{
203 struct of_phandle_args *args = data; 205 struct irq_fwspec *fwspec = data;
204 struct of_phandle_args parent_args; 206 struct irq_fwspec parent_fwspec;
205 irq_hw_number_t hwirq; 207 irq_hw_number_t hwirq;
206 int i; 208 int i;
207 209
208 if (args->args_count != 3) 210 if (fwspec->param_count != 3)
209 return -EINVAL; /* Not GIC compliant */ 211 return -EINVAL; /* Not GIC compliant */
210 if (args->args[0] != 0) 212 if (fwspec->param[0] != 0)
211 return -EINVAL; /* No PPI should point to this domain */ 213 return -EINVAL; /* No PPI should point to this domain */
212 214
213 hwirq = args->args[1]; 215 hwirq = fwspec->param[1];
214 216
215 for (i = 0; i < nr_irqs; i++) 217 for (i = 0; i < nr_irqs; i++)
216 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 218 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
217 &exynos_pmu_chip, NULL); 219 &exynos_pmu_chip, NULL);
218 220
219 parent_args = *args; 221 parent_fwspec = *fwspec;
220 parent_args.np = domain->parent->of_node; 222 parent_fwspec.fwnode = domain->parent->fwnode;
221 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args); 223 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
224 &parent_fwspec);
222} 225}
223 226
224static const struct irq_domain_ops exynos_pmu_domain_ops = { 227static const struct irq_domain_ops exynos_pmu_domain_ops = {
225 .xlate = exynos_pmu_domain_xlate, 228 .translate = exynos_pmu_domain_translate,
226 .alloc = exynos_pmu_domain_alloc, 229 .alloc = exynos_pmu_domain_alloc,
227 .free = irq_domain_free_irqs_common, 230 .free = irq_domain_free_irqs_common,
228}; 231};
229 232
230static int __init exynos_pmu_irq_init(struct device_node *node, 233static int __init exynos_pmu_irq_init(struct device_node *node,
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 8c4467fad837..10bf7159b27d 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -181,40 +181,42 @@ static struct irq_chip imx_gpc_chip = {
181#endif 181#endif
182}; 182};
183 183
184static int imx_gpc_domain_xlate(struct irq_domain *domain, 184static int imx_gpc_domain_translate(struct irq_domain *d,
185 struct device_node *controller, 185 struct irq_fwspec *fwspec,
186 const u32 *intspec, 186 unsigned long *hwirq,
187 unsigned int intsize, 187 unsigned int *type)
188 unsigned long *out_hwirq,
189 unsigned int *out_type)
190{ 188{
191 if (domain->of_node != controller) 189 if (is_of_node(fwspec->fwnode)) {
192 return -EINVAL; /* Shouldn't happen, really... */ 190 if (fwspec->param_count != 3)
193 if (intsize != 3) 191 return -EINVAL;
194 return -EINVAL; /* Not GIC compliant */
195 if (intspec[0] != 0)
196 return -EINVAL; /* No PPI should point to this domain */
197 192
198 *out_hwirq = intspec[1]; 193 /* No PPI should point to this domain */
199 *out_type = intspec[2]; 194 if (fwspec->param[0] != 0)
200 return 0; 195 return -EINVAL;
196
197 *hwirq = fwspec->param[1];
198 *type = fwspec->param[2];
199 return 0;
200 }
201
202 return -EINVAL;
201} 203}
202 204
203static int imx_gpc_domain_alloc(struct irq_domain *domain, 205static int imx_gpc_domain_alloc(struct irq_domain *domain,
204 unsigned int irq, 206 unsigned int irq,
205 unsigned int nr_irqs, void *data) 207 unsigned int nr_irqs, void *data)
206{ 208{
207 struct of_phandle_args *args = data; 209 struct irq_fwspec *fwspec = data;
208 struct of_phandle_args parent_args; 210 struct irq_fwspec parent_fwspec;
209 irq_hw_number_t hwirq; 211 irq_hw_number_t hwirq;
210 int i; 212 int i;
211 213
212 if (args->args_count != 3) 214 if (fwspec->param_count != 3)
213 return -EINVAL; /* Not GIC compliant */ 215 return -EINVAL; /* Not GIC compliant */
214 if (args->args[0] != 0) 216 if (fwspec->param[0] != 0)
215 return -EINVAL; /* No PPI should point to this domain */ 217 return -EINVAL; /* No PPI should point to this domain */
216 218
217 hwirq = args->args[1]; 219 hwirq = fwspec->param[1];
218 if (hwirq >= GPC_MAX_IRQS) 220 if (hwirq >= GPC_MAX_IRQS)
219 return -EINVAL; /* Can't deal with this */ 221 return -EINVAL; /* Can't deal with this */
220 222
@@ -222,15 +224,16 @@ static int imx_gpc_domain_alloc(struct irq_domain *domain,
222 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, 224 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
223 &imx_gpc_chip, NULL); 225 &imx_gpc_chip, NULL);
224 226
225 parent_args = *args; 227 parent_fwspec = *fwspec;
226 parent_args.np = domain->parent->of_node; 228 parent_fwspec.fwnode = domain->parent->fwnode;
227 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args); 229 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
230 &parent_fwspec);
228} 231}
229 232
230static const struct irq_domain_ops imx_gpc_domain_ops = { 233static const struct irq_domain_ops imx_gpc_domain_ops = {
231 .xlate = imx_gpc_domain_xlate, 234 .translate = imx_gpc_domain_translate,
232 .alloc = imx_gpc_domain_alloc, 235 .alloc = imx_gpc_domain_alloc,
233 .free = irq_domain_free_irqs_common, 236 .free = irq_domain_free_irqs_common,
234}; 237};
235 238
236static int __init imx_gpc_init(struct device_node *node, 239static int __init imx_gpc_init(struct device_node *node,
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index c86a5a0aefac..e20fc4178b15 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -117,11 +117,4 @@ config MACH_KIRKWOOD
117 Say 'Y' here if you want your kernel to support boards based 117 Say 'Y' here if you want your kernel to support boards based
118 on the Marvell Kirkwood device tree. 118 on the Marvell Kirkwood device tree.
119 119
120config MACH_NETXBIG
121 bool "LaCie 2Big and 5Big Network v2"
122 depends on MACH_KIRKWOOD
123 help
124 Say 'Y' here if you want your kernel to support the
125 LaCie 2Big and 5Big Network v2
126
127endif 120endif
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
index b4f01497ce0b..ecf9e0c3b107 100644
--- a/arch/arm/mach-mvebu/Makefile
+++ b/arch/arm/mach-mvebu/Makefile
@@ -13,4 +13,3 @@ endif
13 13
14obj-$(CONFIG_MACH_DOVE) += dove.o 14obj-$(CONFIG_MACH_DOVE) += dove.o
15obj-$(CONFIG_MACH_KIRKWOOD) += kirkwood.o kirkwood-pm.o 15obj-$(CONFIG_MACH_KIRKWOOD) += kirkwood.o kirkwood-pm.o
16obj-$(CONFIG_MACH_NETXBIG) += netxbig.o
diff --git a/arch/arm/mach-mvebu/board.h b/arch/arm/mach-mvebu/board.h
deleted file mode 100644
index 98e32cc2ef3d..000000000000
--- a/arch/arm/mach-mvebu/board.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Board functions for Marvell System On Chip
3 *
4 * Copyright (C) 2014
5 *
6 * Andrew Lunn <andrew@lunn.ch>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#ifndef __ARCH_MVEBU_BOARD_H
14#define __ARCH_MVEBU_BOARD_H
15
16#ifdef CONFIG_MACH_NETXBIG
17void netxbig_init(void);
18#else
19static inline void netxbig_init(void) {};
20#endif
21#endif
diff --git a/arch/arm/mach-mvebu/kirkwood.c b/arch/arm/mach-mvebu/kirkwood.c
index 925f75f54268..f9d8e1ea7183 100644
--- a/arch/arm/mach-mvebu/kirkwood.c
+++ b/arch/arm/mach-mvebu/kirkwood.c
@@ -25,7 +25,6 @@
25#include "kirkwood.h" 25#include "kirkwood.h"
26#include "kirkwood-pm.h" 26#include "kirkwood-pm.h"
27#include "common.h" 27#include "common.h"
28#include "board.h"
29 28
30static struct resource kirkwood_cpufreq_resources[] = { 29static struct resource kirkwood_cpufreq_resources[] = {
31 [0] = { 30 [0] = {
@@ -180,9 +179,6 @@ static void __init kirkwood_dt_init(void)
180 kirkwood_pm_init(); 179 kirkwood_pm_init();
181 kirkwood_dt_eth_fixup(); 180 kirkwood_dt_eth_fixup();
182 181
183 if (of_machine_is_compatible("lacie,netxbig"))
184 netxbig_init();
185
186 of_platform_populate(NULL, of_default_bus_match_table, auxdata, NULL); 182 of_platform_populate(NULL, of_default_bus_match_table, auxdata, NULL);
187} 183}
188 184
diff --git a/arch/arm/mach-mvebu/netxbig.c b/arch/arm/mach-mvebu/netxbig.c
deleted file mode 100644
index 94b11b6585a4..000000000000
--- a/arch/arm/mach-mvebu/netxbig.c
+++ /dev/null
@@ -1,191 +0,0 @@
1/*
2 * arch/arm/mach-mvbu/board-netxbig.c
3 *
4 * LaCie 2Big and 5Big Network v2 board setup
5 *
6 * Copyright (C) 2010 Simon Guinot <sguinot@lacie.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/kernel.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/platform_data/leds-kirkwood-netxbig.h>
23#include "common.h"
24
25/*****************************************************************************
26 * GPIO extension LEDs
27 ****************************************************************************/
28
29/*
30 * The LEDs are controlled by a CPLD and can be configured through a GPIO
31 * extension bus:
32 *
33 * - address register : bit [0-2] -> GPIO [47-49]
34 * - data register : bit [0-2] -> GPIO [44-46]
35 * - enable register : GPIO 29
36 */
37
38static int netxbig_v2_gpio_ext_addr[] = { 47, 48, 49 };
39static int netxbig_v2_gpio_ext_data[] = { 44, 45, 46 };
40
41static struct netxbig_gpio_ext netxbig_v2_gpio_ext = {
42 .addr = netxbig_v2_gpio_ext_addr,
43 .num_addr = ARRAY_SIZE(netxbig_v2_gpio_ext_addr),
44 .data = netxbig_v2_gpio_ext_data,
45 .num_data = ARRAY_SIZE(netxbig_v2_gpio_ext_data),
46 .enable = 29,
47};
48
49/*
50 * Address register selection:
51 *
52 * addr | register
53 * ----------------------------
54 * 0 | front LED
55 * 1 | front LED brightness
56 * 2 | SATA LED brightness
57 * 3 | SATA0 LED
58 * 4 | SATA1 LED
59 * 5 | SATA2 LED
60 * 6 | SATA3 LED
61 * 7 | SATA4 LED
62 *
63 * Data register configuration:
64 *
65 * data | LED brightness
66 * -------------------------------------------------
67 * 0 | min (off)
68 * - | -
69 * 7 | max
70 *
71 * data | front LED mode
72 * -------------------------------------------------
73 * 0 | fix off
74 * 1 | fix blue on
75 * 2 | fix red on
76 * 3 | blink blue on=1 sec and blue off=1 sec
77 * 4 | blink red on=1 sec and red off=1 sec
78 * 5 | blink blue on=2.5 sec and red on=0.5 sec
79 * 6 | blink blue on=1 sec and red on=1 sec
80 * 7 | blink blue on=0.5 sec and blue off=2.5 sec
81 *
82 * data | SATA LED mode
83 * -------------------------------------------------
84 * 0 | fix off
85 * 1 | SATA activity blink
86 * 2 | fix red on
87 * 3 | blink blue on=1 sec and blue off=1 sec
88 * 4 | blink red on=1 sec and red off=1 sec
89 * 5 | blink blue on=2.5 sec and red on=0.5 sec
90 * 6 | blink blue on=1 sec and red on=1 sec
91 * 7 | fix blue on
92 */
93
94static int netxbig_v2_red_mled[NETXBIG_LED_MODE_NUM] = {
95 [NETXBIG_LED_OFF] = 0,
96 [NETXBIG_LED_ON] = 2,
97 [NETXBIG_LED_SATA] = NETXBIG_LED_INVALID_MODE,
98 [NETXBIG_LED_TIMER1] = 4,
99 [NETXBIG_LED_TIMER2] = NETXBIG_LED_INVALID_MODE,
100};
101
102static int netxbig_v2_blue_pwr_mled[NETXBIG_LED_MODE_NUM] = {
103 [NETXBIG_LED_OFF] = 0,
104 [NETXBIG_LED_ON] = 1,
105 [NETXBIG_LED_SATA] = NETXBIG_LED_INVALID_MODE,
106 [NETXBIG_LED_TIMER1] = 3,
107 [NETXBIG_LED_TIMER2] = 7,
108};
109
110static int netxbig_v2_blue_sata_mled[NETXBIG_LED_MODE_NUM] = {
111 [NETXBIG_LED_OFF] = 0,
112 [NETXBIG_LED_ON] = 7,
113 [NETXBIG_LED_SATA] = 1,
114 [NETXBIG_LED_TIMER1] = 3,
115 [NETXBIG_LED_TIMER2] = NETXBIG_LED_INVALID_MODE,
116};
117
118static struct netxbig_led_timer netxbig_v2_led_timer[] = {
119 [0] = {
120 .delay_on = 500,
121 .delay_off = 500,
122 .mode = NETXBIG_LED_TIMER1,
123 },
124 [1] = {
125 .delay_on = 500,
126 .delay_off = 1000,
127 .mode = NETXBIG_LED_TIMER2,
128 },
129};
130
131#define NETXBIG_LED(_name, maddr, mval, baddr) \
132 { .name = _name, \
133 .mode_addr = maddr, \
134 .mode_val = mval, \
135 .bright_addr = baddr }
136
137static struct netxbig_led net2big_v2_leds_ctrl[] = {
138 NETXBIG_LED("net2big-v2:blue:power", 0, netxbig_v2_blue_pwr_mled, 1),
139 NETXBIG_LED("net2big-v2:red:power", 0, netxbig_v2_red_mled, 1),
140 NETXBIG_LED("net2big-v2:blue:sata0", 3, netxbig_v2_blue_sata_mled, 2),
141 NETXBIG_LED("net2big-v2:red:sata0", 3, netxbig_v2_red_mled, 2),
142 NETXBIG_LED("net2big-v2:blue:sata1", 4, netxbig_v2_blue_sata_mled, 2),
143 NETXBIG_LED("net2big-v2:red:sata1", 4, netxbig_v2_red_mled, 2),
144};
145
146static struct netxbig_led_platform_data net2big_v2_leds_data = {
147 .gpio_ext = &netxbig_v2_gpio_ext,
148 .timer = netxbig_v2_led_timer,
149 .num_timer = ARRAY_SIZE(netxbig_v2_led_timer),
150 .leds = net2big_v2_leds_ctrl,
151 .num_leds = ARRAY_SIZE(net2big_v2_leds_ctrl),
152};
153
154static struct netxbig_led net5big_v2_leds_ctrl[] = {
155 NETXBIG_LED("net5big-v2:blue:power", 0, netxbig_v2_blue_pwr_mled, 1),
156 NETXBIG_LED("net5big-v2:red:power", 0, netxbig_v2_red_mled, 1),
157 NETXBIG_LED("net5big-v2:blue:sata0", 3, netxbig_v2_blue_sata_mled, 2),
158 NETXBIG_LED("net5big-v2:red:sata0", 3, netxbig_v2_red_mled, 2),
159 NETXBIG_LED("net5big-v2:blue:sata1", 4, netxbig_v2_blue_sata_mled, 2),
160 NETXBIG_LED("net5big-v2:red:sata1", 4, netxbig_v2_red_mled, 2),
161 NETXBIG_LED("net5big-v2:blue:sata2", 5, netxbig_v2_blue_sata_mled, 2),
162 NETXBIG_LED("net5big-v2:red:sata2", 5, netxbig_v2_red_mled, 2),
163 NETXBIG_LED("net5big-v2:blue:sata3", 6, netxbig_v2_blue_sata_mled, 2),
164 NETXBIG_LED("net5big-v2:red:sata3", 6, netxbig_v2_red_mled, 2),
165 NETXBIG_LED("net5big-v2:blue:sata4", 7, netxbig_v2_blue_sata_mled, 2),
166 NETXBIG_LED("net5big-v2:red:sata4", 7, netxbig_v2_red_mled, 2),
167};
168
169static struct netxbig_led_platform_data net5big_v2_leds_data = {
170 .gpio_ext = &netxbig_v2_gpio_ext,
171 .timer = netxbig_v2_led_timer,
172 .num_timer = ARRAY_SIZE(netxbig_v2_led_timer),
173 .leds = net5big_v2_leds_ctrl,
174 .num_leds = ARRAY_SIZE(net5big_v2_leds_ctrl),
175};
176
177static struct platform_device netxbig_v2_leds = {
178 .name = "leds-netxbig",
179 .id = -1,
180 .dev = {
181 .platform_data = &net2big_v2_leds_data,
182 },
183};
184
185void __init netxbig_init(void)
186{
187
188 if (of_machine_is_compatible("lacie,net5big_v2"))
189 netxbig_v2_leds.dev.platform_data = &net5big_v2_leds_data;
190 platform_device_register(&netxbig_v2_leds);
191}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index b3a0dff67e3f..33d1460a5639 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -49,6 +49,7 @@ config SOC_OMAP5
49 select OMAP_INTERCONNECT 49 select OMAP_INTERCONNECT
50 select OMAP_INTERCONNECT_BARRIER 50 select OMAP_INTERCONNECT_BARRIER
51 select PM_OPP if PM 51 select PM_OPP if PM
52 select ZONE_DMA if ARM_LPAE
52 53
53config SOC_AM33XX 54config SOC_AM33XX
54 bool "TI AM33XX" 55 bool "TI AM33XX"
@@ -78,6 +79,7 @@ config SOC_DRA7XX
78 select OMAP_INTERCONNECT 79 select OMAP_INTERCONNECT
79 select OMAP_INTERCONNECT_BARRIER 80 select OMAP_INTERCONNECT_BARRIER
80 select PM_OPP if PM 81 select PM_OPP if PM
82 select ZONE_DMA if ARM_LPAE
81 83
82config ARCH_OMAP2PLUS 84config ARCH_OMAP2PLUS
83 bool 85 bool
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 6133eaac685d..fb219a30c10c 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -106,6 +106,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
106MACHINE_END 106MACHINE_END
107 107
108static const char *const omap36xx_boards_compat[] __initconst = { 108static const char *const omap36xx_boards_compat[] __initconst = {
109 "ti,omap3630",
109 "ti,omap36xx", 110 "ti,omap36xx",
110 NULL, 111 NULL,
111}; 112};
@@ -243,6 +244,9 @@ static const char *const omap5_boards_compat[] __initconst = {
243}; 244};
244 245
245DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)") 246DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)")
247#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
248 .dma_zone_size = SZ_2G,
249#endif
246 .reserve = omap_reserve, 250 .reserve = omap_reserve,
247 .smp = smp_ops(omap4_smp_ops), 251 .smp = smp_ops(omap4_smp_ops),
248 .map_io = omap5_map_io, 252 .map_io = omap5_map_io,
@@ -288,6 +292,9 @@ static const char *const dra74x_boards_compat[] __initconst = {
288}; 292};
289 293
290DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)") 294DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)")
295#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
296 .dma_zone_size = SZ_2G,
297#endif
291 .reserve = omap_reserve, 298 .reserve = omap_reserve,
292 .smp = smp_ops(omap4_smp_ops), 299 .smp = smp_ops(omap4_smp_ops),
293 .map_io = dra7xx_map_io, 300 .map_io = dra7xx_map_io,
@@ -308,6 +315,9 @@ static const char *const dra72x_boards_compat[] __initconst = {
308}; 315};
309 316
310DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)") 317DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)")
318#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
319 .dma_zone_size = SZ_2G,
320#endif
311 .reserve = omap_reserve, 321 .reserve = omap_reserve,
312 .map_io = dra7xx_map_io, 322 .map_io = dra7xx_map_io,
313 .init_early = dra7xx_init_early, 323 .init_early = dra7xx_init_early,
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index e1d2e991d17a..db7e0bab3587 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -399,40 +399,42 @@ static struct irq_chip wakeupgen_chip = {
399#endif 399#endif
400}; 400};
401 401
402static int wakeupgen_domain_xlate(struct irq_domain *domain, 402static int wakeupgen_domain_translate(struct irq_domain *d,
403 struct device_node *controller, 403 struct irq_fwspec *fwspec,
404 const u32 *intspec, 404 unsigned long *hwirq,
405 unsigned int intsize, 405 unsigned int *type)
406 unsigned long *out_hwirq,
407 unsigned int *out_type)
408{ 406{
409 if (domain->of_node != controller) 407 if (is_of_node(fwspec->fwnode)) {
410 return -EINVAL; /* Shouldn't happen, really... */ 408 if (fwspec->param_count != 3)
411 if (intsize != 3) 409 return -EINVAL;
412 return -EINVAL; /* Not GIC compliant */
413 if (intspec[0] != 0)
414 return -EINVAL; /* No PPI should point to this domain */
415 410
416 *out_hwirq = intspec[1]; 411 /* No PPI should point to this domain */
417 *out_type = intspec[2]; 412 if (fwspec->param[0] != 0)
418 return 0; 413 return -EINVAL;
414
415 *hwirq = fwspec->param[1];
416 *type = fwspec->param[2];
417 return 0;
418 }
419
420 return -EINVAL;
419} 421}
420 422
421static int wakeupgen_domain_alloc(struct irq_domain *domain, 423static int wakeupgen_domain_alloc(struct irq_domain *domain,
422 unsigned int virq, 424 unsigned int virq,
423 unsigned int nr_irqs, void *data) 425 unsigned int nr_irqs, void *data)
424{ 426{
425 struct of_phandle_args *args = data; 427 struct irq_fwspec *fwspec = data;
426 struct of_phandle_args parent_args; 428 struct irq_fwspec parent_fwspec;
427 irq_hw_number_t hwirq; 429 irq_hw_number_t hwirq;
428 int i; 430 int i;
429 431
430 if (args->args_count != 3) 432 if (fwspec->param_count != 3)
431 return -EINVAL; /* Not GIC compliant */ 433 return -EINVAL; /* Not GIC compliant */
432 if (args->args[0] != 0) 434 if (fwspec->param[0] != 0)
433 return -EINVAL; /* No PPI should point to this domain */ 435 return -EINVAL; /* No PPI should point to this domain */
434 436
435 hwirq = args->args[1]; 437 hwirq = fwspec->param[1];
436 if (hwirq >= MAX_IRQS) 438 if (hwirq >= MAX_IRQS)
437 return -EINVAL; /* Can't deal with this */ 439 return -EINVAL; /* Can't deal with this */
438 440
@@ -440,15 +442,16 @@ static int wakeupgen_domain_alloc(struct irq_domain *domain,
440 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 442 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
441 &wakeupgen_chip, NULL); 443 &wakeupgen_chip, NULL);
442 444
443 parent_args = *args; 445 parent_fwspec = *fwspec;
444 parent_args.np = domain->parent->of_node; 446 parent_fwspec.fwnode = domain->parent->fwnode;
445 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args); 447 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
448 &parent_fwspec);
446} 449}
447 450
448static const struct irq_domain_ops wakeupgen_domain_ops = { 451static const struct irq_domain_ops wakeupgen_domain_ops = {
449 .xlate = wakeupgen_domain_xlate, 452 .translate = wakeupgen_domain_translate,
450 .alloc = wakeupgen_domain_alloc, 453 .alloc = wakeupgen_domain_alloc,
451 .free = irq_domain_free_irqs_common, 454 .free = irq_domain_free_irqs_common,
452}; 455};
453 456
454/* 457/*
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index ea56397599c2..1dfe34654c43 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -559,7 +559,14 @@ static void pdata_quirks_check(struct pdata_init *quirks)
559 559
560void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table) 560void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table)
561{ 561{
562 omap_sdrc_init(NULL, NULL); 562 /*
563 * We still need this for omap2420 and omap3 PM to work, others are
564 * using drivers/misc/sram.c already.
565 */
566 if (of_machine_is_compatible("ti,omap2420") ||
567 of_machine_is_compatible("ti,omap3"))
568 omap_sdrc_init(NULL, NULL);
569
563 pdata_quirks_check(auxdata_quirks); 570 pdata_quirks_check(auxdata_quirks);
564 of_platform_populate(NULL, omap_dt_match_table, 571 of_platform_populate(NULL, omap_dt_match_table,
565 omap_auxdata_lookup, NULL); 572 omap_auxdata_lookup, NULL);
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 06005d3f2ba3..20ce2d386f17 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -42,10 +42,6 @@
42#define PECR_IS(n) ((1 << ((n) * 2)) << 29) 42#define PECR_IS(n) ((1 << ((n) * 2)) << 29)
43 43
44extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)); 44extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
45#ifdef CONFIG_PM
46
47#define ISRAM_START 0x5c000000
48#define ISRAM_SIZE SZ_256K
49 45
50/* 46/*
51 * NAND NFC: DFI bus arbitration subset 47 * NAND NFC: DFI bus arbitration subset
@@ -54,6 +50,11 @@ extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
54#define NDCR_ND_ARB_EN (1 << 12) 50#define NDCR_ND_ARB_EN (1 << 12)
55#define NDCR_ND_ARB_CNTL (1 << 19) 51#define NDCR_ND_ARB_CNTL (1 << 19)
56 52
53#ifdef CONFIG_PM
54
55#define ISRAM_START 0x5c000000
56#define ISRAM_SIZE SZ_256K
57
57static void __iomem *sram; 58static void __iomem *sram;
58static unsigned long wakeup_src; 59static unsigned long wakeup_src;
59 60
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index df7537f12469..c21941349b3e 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -419,28 +419,24 @@ config CPU_THUMBONLY
419config CPU_32v3 419config CPU_32v3
420 bool 420 bool
421 select CPU_USE_DOMAINS if MMU 421 select CPU_USE_DOMAINS if MMU
422 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
423 select NEED_KUSER_HELPERS 422 select NEED_KUSER_HELPERS
424 select TLS_REG_EMUL if SMP || !MMU 423 select TLS_REG_EMUL if SMP || !MMU
425 424
426config CPU_32v4 425config CPU_32v4
427 bool 426 bool
428 select CPU_USE_DOMAINS if MMU 427 select CPU_USE_DOMAINS if MMU
429 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
430 select NEED_KUSER_HELPERS 428 select NEED_KUSER_HELPERS
431 select TLS_REG_EMUL if SMP || !MMU 429 select TLS_REG_EMUL if SMP || !MMU
432 430
433config CPU_32v4T 431config CPU_32v4T
434 bool 432 bool
435 select CPU_USE_DOMAINS if MMU 433 select CPU_USE_DOMAINS if MMU
436 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
437 select NEED_KUSER_HELPERS 434 select NEED_KUSER_HELPERS
438 select TLS_REG_EMUL if SMP || !MMU 435 select TLS_REG_EMUL if SMP || !MMU
439 436
440config CPU_32v5 437config CPU_32v5
441 bool 438 bool
442 select CPU_USE_DOMAINS if MMU 439 select CPU_USE_DOMAINS if MMU
443 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
444 select NEED_KUSER_HELPERS 440 select NEED_KUSER_HELPERS
445 select TLS_REG_EMUL if SMP || !MMU 441 select TLS_REG_EMUL if SMP || !MMU
446 442
@@ -805,14 +801,6 @@ config TLS_REG_EMUL
805 a few prototypes like that in existence) and therefore access to 801 a few prototypes like that in existence) and therefore access to
806 that required register must be emulated. 802 that required register must be emulated.
807 803
808config NEEDS_SYSCALL_FOR_CMPXCHG
809 bool
810 select NEED_KUSER_HELPERS
811 help
812 SMP on a pre-ARMv6 processor? Well OK then.
813 Forget about fast user space cmpxchg support.
814 It is just not possible.
815
816config NEED_KUSER_HELPERS 804config NEED_KUSER_HELPERS
817 bool 805 bool
818 806
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1a7815e5421b..ad4eb2d26e16 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1407,12 +1407,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1407 unsigned long uaddr = vma->vm_start; 1407 unsigned long uaddr = vma->vm_start;
1408 unsigned long usize = vma->vm_end - vma->vm_start; 1408 unsigned long usize = vma->vm_end - vma->vm_start;
1409 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1409 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1410 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1411 unsigned long off = vma->vm_pgoff;
1410 1412
1411 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1413 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1412 1414
1413 if (!pages) 1415 if (!pages)
1414 return -ENXIO; 1416 return -ENXIO;
1415 1417
1418 if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
1419 return -ENXIO;
1420
1421 pages += off;
1422
1416 do { 1423 do {
1417 int ret = vm_insert_page(vma, uaddr, *pages++); 1424 int ret = vm_insert_page(vma, uaddr, *pages++);
1418 if (ret) { 1425 if (ret) {
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 0d629b8f973f..daafcf121ce0 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -593,6 +593,28 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
593 arm_notify_die("", regs, &info, ifsr, 0); 593 arm_notify_die("", regs, &info, ifsr, 0);
594} 594}
595 595
596/*
597 * Abort handler to be used only during first unmasking of asynchronous aborts
598 * on the boot CPU. This makes sure that the machine will not die if the
599 * firmware/bootloader left an imprecise abort pending for us to trip over.
600 */
601static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
602 struct pt_regs *regs)
603{
604 pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
605 "first unmask, this is most likely caused by a "
606 "firmware/bootloader bug.\n", fsr);
607
608 return 0;
609}
610
611void __init early_abt_enable(void)
612{
613 fsr_info[22].fn = early_abort_handler;
614 local_abt_enable();
615 fsr_info[22].fn = do_bad;
616}
617
596#ifndef CONFIG_ARM_LPAE 618#ifndef CONFIG_ARM_LPAE
597static int __init exceptions_init(void) 619static int __init exceptions_init(void)
598{ 620{
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index cf08bdfbe0d6..05ec5e0df32d 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -24,5 +24,6 @@ static inline int fsr_fs(unsigned int fsr)
24 24
25void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); 25void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
26unsigned long search_exception_table(unsigned long addr); 26unsigned long search_exception_table(unsigned long addr);
27void early_abt_enable(void);
27 28
28#endif /* __ARCH_ARM_FAULT_H */ 29#endif /* __ARCH_ARM_FAULT_H */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 7cd15143a507..4867f5daf82c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -38,6 +38,7 @@
38#include <asm/mach/pci.h> 38#include <asm/mach/pci.h>
39#include <asm/fixmap.h> 39#include <asm/fixmap.h>
40 40
41#include "fault.h"
41#include "mm.h" 42#include "mm.h"
42#include "tcm.h" 43#include "tcm.h"
43 44
@@ -1363,6 +1364,9 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
1363 */ 1364 */
1364 local_flush_tlb_all(); 1365 local_flush_tlb_all();
1365 flush_cache_all(); 1366 flush_cache_all();
1367
1368 /* Enable asynchronous aborts */
1369 early_abt_enable();
1366} 1370}
1367 1371
1368static void __init kmap_init(void) 1372static void __init kmap_init(void)
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 2235081a04ee..8861c367d061 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -495,7 +495,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq)
495 495
496 d->netdev = &orion_ge00.dev; 496 d->netdev = &orion_ge00.dev;
497 for (i = 0; i < d->nr_chips; i++) 497 for (i = 0; i < d->nr_chips; i++)
498 d->chip[i].host_dev = &orion_ge00_shared.dev; 498 d->chip[i].host_dev = &orion_ge_mvmdio.dev;
499 orion_switch_device.dev.platform_data = d; 499 orion_switch_device.dev.platform_data = d;
500 500
501 platform_device_register(&orion_switch_device); 501 platform_device_register(&orion_switch_device);
diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
index aedec81d1198..f6455273b2f8 100644
--- a/arch/arm/vdso/vdsomunge.c
+++ b/arch/arm/vdso/vdsomunge.c
@@ -45,7 +45,6 @@
45 * it does. 45 * it does.
46 */ 46 */
47 47
48#include <byteswap.h>
49#include <elf.h> 48#include <elf.h>
50#include <errno.h> 49#include <errno.h>
51#include <fcntl.h> 50#include <fcntl.h>
@@ -59,6 +58,16 @@
59#include <sys/types.h> 58#include <sys/types.h>
60#include <unistd.h> 59#include <unistd.h>
61 60
61#define swab16(x) \
62 ((((x) & 0x00ff) << 8) | \
63 (((x) & 0xff00) >> 8))
64
65#define swab32(x) \
66 ((((x) & 0x000000ff) << 24) | \
67 (((x) & 0x0000ff00) << 8) | \
68 (((x) & 0x00ff0000) >> 8) | \
69 (((x) & 0xff000000) >> 24))
70
62#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 71#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
63#define HOST_ORDER ELFDATA2LSB 72#define HOST_ORDER ELFDATA2LSB
64#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 73#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
@@ -104,17 +113,17 @@ static void cleanup(void)
104 113
105static Elf32_Word read_elf_word(Elf32_Word word, bool swap) 114static Elf32_Word read_elf_word(Elf32_Word word, bool swap)
106{ 115{
107 return swap ? bswap_32(word) : word; 116 return swap ? swab32(word) : word;
108} 117}
109 118
110static Elf32_Half read_elf_half(Elf32_Half half, bool swap) 119static Elf32_Half read_elf_half(Elf32_Half half, bool swap)
111{ 120{
112 return swap ? bswap_16(half) : half; 121 return swap ? swab16(half) : half;
113} 122}
114 123
115static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap) 124static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap)
116{ 125{
117 *dst = swap ? bswap_32(val) : val; 126 *dst = swap ? swab32(val) : val;
118} 127}
119 128
120int main(int argc, char **argv) 129int main(int argc, char **argv)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 07d1811aa03f..440d906429de 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -348,6 +348,33 @@ config ARM64_ERRATUM_843419
348 348
349 If unsure, say Y. 349 If unsure, say Y.
350 350
351config CAVIUM_ERRATUM_22375
352 bool "Cavium erratum 22375, 24313"
353 default y
354 help
355 Enable workaround for erratum 22375, 24313.
356
357 This implements two gicv3-its errata workarounds for ThunderX. Both
358 with small impact affecting only ITS table allocation.
359
360 erratum 22375: only alloc 8MB table size
361 erratum 24313: ignore memory access type
362
363 The fixes are in ITS initialization and basically ignore memory access
364 type and table size provided by the TYPER and BASER registers.
365
366 If unsure, say Y.
367
368config CAVIUM_ERRATUM_23154
369 bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
370 default y
371 help
372 The gicv3 of ThunderX requires a modified version for
373 reading the IAR status to ensure data synchronization
374 (access to icc_iar1_el1 is not sync'ed before and after).
375
376 If unsure, say Y.
377
351endmenu 378endmenu
352 379
353 380
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index d831bc2ac204..fac1720472f9 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -477,6 +477,16 @@
477 reg = <0x0 0x7c600000 0x0 0x200000>; 477 reg = <0x0 0x7c600000 0x0 0x200000>;
478 pmd-controller = <3>; 478 pmd-controller = <3>;
479 }; 479 };
480
481 edacl3@7e600000 {
482 compatible = "apm,xgene-edac-l3";
483 reg = <0x0 0x7e600000 0x0 0x1000>;
484 };
485
486 edacsoc@7e930000 {
487 compatible = "apm,xgene-edac-soc-v1";
488 reg = <0x0 0x7e930000 0x0 0x1000>;
489 };
480 }; 490 };
481 491
482 pcie0: pcie@1f2b0000 { 492 pcie0: pcie@1f2b0000 {
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index 637e046f0e36..3c386680357e 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -61,42 +61,42 @@
61 61
62 button@1 { 62 button@1 {
63 debounce_interval = <50>; 63 debounce_interval = <50>;
64 wakeup = <1>; 64 wakeup-source;
65 linux,code = <116>; 65 linux,code = <116>;
66 label = "POWER"; 66 label = "POWER";
67 gpios = <&iofpga_gpio0 0 0x4>; 67 gpios = <&iofpga_gpio0 0 0x4>;
68 }; 68 };
69 button@2 { 69 button@2 {
70 debounce_interval = <50>; 70 debounce_interval = <50>;
71 wakeup = <1>; 71 wakeup-source;
72 linux,code = <102>; 72 linux,code = <102>;
73 label = "HOME"; 73 label = "HOME";
74 gpios = <&iofpga_gpio0 1 0x4>; 74 gpios = <&iofpga_gpio0 1 0x4>;
75 }; 75 };
76 button@3 { 76 button@3 {
77 debounce_interval = <50>; 77 debounce_interval = <50>;
78 wakeup = <1>; 78 wakeup-source;
79 linux,code = <152>; 79 linux,code = <152>;
80 label = "RLOCK"; 80 label = "RLOCK";
81 gpios = <&iofpga_gpio0 2 0x4>; 81 gpios = <&iofpga_gpio0 2 0x4>;
82 }; 82 };
83 button@4 { 83 button@4 {
84 debounce_interval = <50>; 84 debounce_interval = <50>;
85 wakeup = <1>; 85 wakeup-source;
86 linux,code = <115>; 86 linux,code = <115>;
87 label = "VOL+"; 87 label = "VOL+";
88 gpios = <&iofpga_gpio0 3 0x4>; 88 gpios = <&iofpga_gpio0 3 0x4>;
89 }; 89 };
90 button@5 { 90 button@5 {
91 debounce_interval = <50>; 91 debounce_interval = <50>;
92 wakeup = <1>; 92 wakeup-source;
93 linux,code = <114>; 93 linux,code = <114>;
94 label = "VOL-"; 94 label = "VOL-";
95 gpios = <&iofpga_gpio0 4 0x4>; 95 gpios = <&iofpga_gpio0 4 0x4>;
96 }; 96 };
97 button@6 { 97 button@6 {
98 debounce_interval = <50>; 98 debounce_interval = <50>;
99 wakeup = <1>; 99 wakeup-source;
100 linux,code = <99>; 100 linux,code = <99>;
101 label = "NMI"; 101 label = "NMI";
102 gpios = <&iofpga_gpio0 5 0x4>; 102 gpios = <&iofpga_gpio0 5 0x4>;
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 208cec08a74f..5f8a38dee274 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -92,4 +92,9 @@ static inline const char *acpi_get_enable_method(int cpu)
92{ 92{
93 return acpi_psci_present() ? "psci" : NULL; 93 return acpi_psci_present() ? "psci" : NULL;
94} 94}
95
96#ifdef CONFIG_ACPI_APEI
97pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
98#endif
99
95#endif /*_ASM_ACPI_H*/ 100#endif /*_ASM_ACPI_H*/
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
new file mode 100644
index 000000000000..030cdcb46c6b
--- /dev/null
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -0,0 +1,170 @@
1/*
2 * arch/arm64/include/asm/arch_gicv3.h
3 *
4 * Copyright (C) 2015 ARM Ltd.
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_ARCH_GICV3_H
19#define __ASM_ARCH_GICV3_H
20
21#include <asm/sysreg.h>
22
23#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
24#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
25#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
26#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
27#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
28#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
29#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
30#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
31
32#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
33
34/*
35 * System register definitions
36 */
37#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
38#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
39#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
40#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
41#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
42#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
43#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
44
45#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
46#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
47
48#define ICH_LR0_EL2 __LR0_EL2(0)
49#define ICH_LR1_EL2 __LR0_EL2(1)
50#define ICH_LR2_EL2 __LR0_EL2(2)
51#define ICH_LR3_EL2 __LR0_EL2(3)
52#define ICH_LR4_EL2 __LR0_EL2(4)
53#define ICH_LR5_EL2 __LR0_EL2(5)
54#define ICH_LR6_EL2 __LR0_EL2(6)
55#define ICH_LR7_EL2 __LR0_EL2(7)
56#define ICH_LR8_EL2 __LR8_EL2(0)
57#define ICH_LR9_EL2 __LR8_EL2(1)
58#define ICH_LR10_EL2 __LR8_EL2(2)
59#define ICH_LR11_EL2 __LR8_EL2(3)
60#define ICH_LR12_EL2 __LR8_EL2(4)
61#define ICH_LR13_EL2 __LR8_EL2(5)
62#define ICH_LR14_EL2 __LR8_EL2(6)
63#define ICH_LR15_EL2 __LR8_EL2(7)
64
65#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
66#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
67#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
68#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
69#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
70
71#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
72#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
73#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
74#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
75#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
76
77#ifndef __ASSEMBLY__
78
79#include <linux/stringify.h>
80
81/*
82 * Low-level accessors
83 *
84 * These system registers are 32 bits, but we make sure that the compiler
85 * sets the GP register's most significant bits to 0 with an explicit cast.
86 */
87
88static inline void gic_write_eoir(u32 irq)
89{
90 asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" ((u64)irq));
91 isb();
92}
93
94static inline void gic_write_dir(u32 irq)
95{
96 asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" ((u64)irq));
97 isb();
98}
99
100static inline u64 gic_read_iar_common(void)
101{
102 u64 irqstat;
103
104 asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
105 return irqstat;
106}
107
108/*
109 * Cavium ThunderX erratum 23154
110 *
111 * The gicv3 of ThunderX requires a modified version for reading the
112 * IAR status to ensure data synchronization (access to icc_iar1_el1
113 * is not sync'ed before and after).
114 */
115static inline u64 gic_read_iar_cavium_thunderx(void)
116{
117 u64 irqstat;
118
119 asm volatile(
120 "nop;nop;nop;nop\n\t"
121 "nop;nop;nop;nop\n\t"
122 "mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t"
123 "nop;nop;nop;nop"
124 : "=r" (irqstat));
125 mb();
126
127 return irqstat;
128}
129
130static inline void gic_write_pmr(u32 val)
131{
132 asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
133}
134
135static inline void gic_write_ctlr(u32 val)
136{
137 asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" ((u64)val));
138 isb();
139}
140
141static inline void gic_write_grpen1(u32 val)
142{
143 asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" ((u64)val));
144 isb();
145}
146
147static inline void gic_write_sgi1r(u64 val)
148{
149 asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
150}
151
152static inline u32 gic_read_sre(void)
153{
154 u64 val;
155
156 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
157 return val;
158}
159
160static inline void gic_write_sre(u32 val)
161{
162 asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((u64)val));
163 isb();
164}
165
166#define gic_read_typer(c) readq_relaxed(c)
167#define gic_write_irouter(v, c) writeq_relaxed(v, c)
168
169#endif /* __ASSEMBLY__ */
170#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 35a67783cfa0..1e247ac2601a 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,7 +54,7 @@
54#define ATOMIC_INIT(i) { (i) } 54#define ATOMIC_INIT(i) { (i) }
55 55
56#define atomic_read(v) READ_ONCE((v)->counter) 56#define atomic_read(v) READ_ONCE((v)->counter)
57#define atomic_set(v, i) (((v)->counter) = (i)) 57#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
58#define atomic_xchg(v, new) xchg(&((v)->counter), (new)) 58#define atomic_xchg(v, new) xchg(&((v)->counter), (new))
59#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) 59#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
60 60
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 171570702bb8..dbc78d2b8cc6 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -27,8 +27,9 @@
27#define ARM64_HAS_SYSREG_GIC_CPUIF 3 27#define ARM64_HAS_SYSREG_GIC_CPUIF 3
28#define ARM64_HAS_PAN 4 28#define ARM64_HAS_PAN 4
29#define ARM64_HAS_LSE_ATOMICS 5 29#define ARM64_HAS_LSE_ATOMICS 5
30#define ARM64_WORKAROUND_CAVIUM_23154 6
30 31
31#define ARM64_NCAPS 6 32#define ARM64_NCAPS 7
32 33
33#ifndef __ASSEMBLY__ 34#ifndef __ASSEMBLY__
34 35
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index ee6403df9fe4..100a3d1b17c8 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -62,15 +62,18 @@
62 (0xf << MIDR_ARCHITECTURE_SHIFT) | \ 62 (0xf << MIDR_ARCHITECTURE_SHIFT) | \
63 ((partnum) << MIDR_PARTNUM_SHIFT)) 63 ((partnum) << MIDR_PARTNUM_SHIFT))
64 64
65#define ARM_CPU_IMP_ARM 0x41 65#define ARM_CPU_IMP_ARM 0x41
66#define ARM_CPU_IMP_APM 0x50 66#define ARM_CPU_IMP_APM 0x50
67#define ARM_CPU_IMP_CAVIUM 0x43
67 68
68#define ARM_CPU_PART_AEM_V8 0xD0F 69#define ARM_CPU_PART_AEM_V8 0xD0F
69#define ARM_CPU_PART_FOUNDATION 0xD00 70#define ARM_CPU_PART_FOUNDATION 0xD00
70#define ARM_CPU_PART_CORTEX_A57 0xD07 71#define ARM_CPU_PART_CORTEX_A57 0xD07
71#define ARM_CPU_PART_CORTEX_A53 0xD03 72#define ARM_CPU_PART_CORTEX_A53 0xD03
72 73
73#define APM_CPU_PART_POTENZA 0x000 74#define APM_CPU_PART_POTENZA 0x000
75
76#define CAVIUM_CPU_PART_THUNDERX 0x0A1
74 77
75#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 78#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
76#define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT) 79#define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 6b4c3ad75a2a..67027c611dbd 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -94,6 +94,7 @@
94#define MT_DEVICE_GRE 2 94#define MT_DEVICE_GRE 2
95#define MT_NORMAL_NC 3 95#define MT_NORMAL_NC 3
96#define MT_NORMAL 4 96#define MT_NORMAL 4
97#define MT_NORMAL_WT 5
97 98
98/* 99/*
99 * Memory types for Stage-2 translation 100 * Memory types for Stage-2 translation
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 26b066690593..571ca0ed4f05 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -60,8 +60,10 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
60#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) 60#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
61#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) 61#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
62 62
63#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
63#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 64#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
64#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) 65#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
66#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
65#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) 67#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
66 68
67#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) 69#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 19de7537e7d3..137d537ddceb 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -29,6 +29,11 @@
29#include <asm/cpu_ops.h> 29#include <asm/cpu_ops.h>
30#include <asm/smp_plat.h> 30#include <asm/smp_plat.h>
31 31
32#ifdef CONFIG_ACPI_APEI
33# include <linux/efi.h>
34# include <asm/pgtable.h>
35#endif
36
32int acpi_noirq = 1; /* skip ACPI IRQ initialization */ 37int acpi_noirq = 1; /* skip ACPI IRQ initialization */
33int acpi_disabled = 1; 38int acpi_disabled = 1;
34EXPORT_SYMBOL(acpi_disabled); 39EXPORT_SYMBOL(acpi_disabled);
@@ -230,3 +235,27 @@ void __init acpi_gic_init(void)
230 235
231 early_acpi_os_unmap_memory((char *)table, tbl_size); 236 early_acpi_os_unmap_memory((char *)table, tbl_size);
232} 237}
238
239#ifdef CONFIG_ACPI_APEI
240pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
241{
242 /*
243 * According to "Table 8 Map: EFI memory types to AArch64 memory
244 * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
245 * mapped to a corresponding MAIR attribute encoding.
246 * The EFI memory attribute advises all possible capabilities
247 * of a memory region. We use the most efficient capability.
248 */
249
250 u64 attr;
251
252 attr = efi_mem_attributes(addr);
253 if (attr & EFI_MEMORY_WB)
254 return PAGE_KERNEL;
255 if (attr & EFI_MEMORY_WT)
256 return __pgprot(PROT_NORMAL_WT);
257 if (attr & EFI_MEMORY_WC)
258 return __pgprot(PROT_NORMAL_NC);
259 return __pgprot(PROT_DEVICE_nGnRnE);
260}
261#endif
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index bcee7abac68e..937f5e58a4d3 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -284,21 +284,23 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
284 __asm__ __volatile__( \ 284 __asm__ __volatile__( \
285 ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 285 ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
286 CONFIG_ARM64_PAN) \ 286 CONFIG_ARM64_PAN) \
287 " mov %w2, %w1\n" \ 287 "0: ldxr"B" %w2, [%3]\n" \
288 "0: ldxr"B" %w1, [%3]\n" \ 288 "1: stxr"B" %w0, %w1, [%3]\n" \
289 "1: stxr"B" %w0, %w2, [%3]\n" \
290 " cbz %w0, 2f\n" \ 289 " cbz %w0, 2f\n" \
291 " mov %w0, %w4\n" \ 290 " mov %w0, %w4\n" \
291 " b 3f\n" \
292 "2:\n" \ 292 "2:\n" \
293 " mov %w1, %w2\n" \
294 "3:\n" \
293 " .pushsection .fixup,\"ax\"\n" \ 295 " .pushsection .fixup,\"ax\"\n" \
294 " .align 2\n" \ 296 " .align 2\n" \
295 "3: mov %w0, %w5\n" \ 297 "4: mov %w0, %w5\n" \
296 " b 2b\n" \ 298 " b 3b\n" \
297 " .popsection" \ 299 " .popsection" \
298 " .pushsection __ex_table,\"a\"\n" \ 300 " .pushsection __ex_table,\"a\"\n" \
299 " .align 3\n" \ 301 " .align 3\n" \
300 " .quad 0b, 3b\n" \ 302 " .quad 0b, 4b\n" \
301 " .quad 1b, 3b\n" \ 303 " .quad 1b, 4b\n" \
302 " .popsection\n" \ 304 " .popsection\n" \
303 ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 305 ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
304 CONFIG_ARM64_PAN) \ 306 CONFIG_ARM64_PAN) \
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 6ffd91438560..574450c257a4 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -23,6 +23,7 @@
23 23
24#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) 24#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
25#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) 25#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
26#define MIDR_THUNDERX MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
26 27
27#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ 28#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
28 MIDR_ARCHITECTURE_MASK) 29 MIDR_ARCHITECTURE_MASK)
@@ -82,6 +83,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
82 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), 83 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
83 }, 84 },
84#endif 85#endif
86#ifdef CONFIG_CAVIUM_ERRATUM_23154
87 {
88 /* Cavium ThunderX, pass 1.x */
89 .desc = "Cavium erratum 23154",
90 .capability = ARM64_WORKAROUND_CAVIUM_23154,
91 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
92 },
93#endif
85 { 94 {
86 } 95 }
87}; 96};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 3c9aed32f70b..305f30dc9e63 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -23,6 +23,8 @@
23#include <asm/cpufeature.h> 23#include <asm/cpufeature.h>
24#include <asm/processor.h> 24#include <asm/processor.h>
25 25
26#include <linux/irqchip/arm-gic-v3.h>
27
26static bool 28static bool
27feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) 29feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
28{ 30{
@@ -45,11 +47,26 @@ __ID_FEAT_CHK(id_aa64pfr0);
45__ID_FEAT_CHK(id_aa64mmfr1); 47__ID_FEAT_CHK(id_aa64mmfr1);
46__ID_FEAT_CHK(id_aa64isar0); 48__ID_FEAT_CHK(id_aa64isar0);
47 49
50static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
51{
52 bool has_sre;
53
54 if (!has_id_aa64pfr0_feature(entry))
55 return false;
56
57 has_sre = gic_enable_sre();
58 if (!has_sre)
59 pr_warn_once("%s present but disabled by higher exception level\n",
60 entry->desc);
61
62 return has_sre;
63}
64
48static const struct arm64_cpu_capabilities arm64_features[] = { 65static const struct arm64_cpu_capabilities arm64_features[] = {
49 { 66 {
50 .desc = "GIC system register CPU interface", 67 .desc = "GIC system register CPU interface",
51 .capability = ARM64_HAS_SYSREG_GIC_CPUIF, 68 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
52 .matches = has_id_aa64pfr0_feature, 69 .matches = has_useable_gicv3_cpuif,
53 .field_pos = 24, 70 .field_pos = 24,
54 .min_field_value = 1, 71 .min_field_value = 1,
55 }, 72 },
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index 816120ece6bc..78dfbd34b6bf 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -25,10 +25,20 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
25 unsigned long kernel_size, kernel_memsize = 0; 25 unsigned long kernel_size, kernel_memsize = 0;
26 unsigned long nr_pages; 26 unsigned long nr_pages;
27 void *old_image_addr = (void *)*image_addr; 27 void *old_image_addr = (void *)*image_addr;
28 unsigned long preferred_offset;
29
30 /*
31 * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
32 * a 2 MB aligned base, which itself may be lower than dram_base, as
33 * long as the resulting offset equals or exceeds it.
34 */
35 preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
36 if (preferred_offset < dram_base)
37 preferred_offset += SZ_2M;
28 38
29 /* Relocate the image, if required. */ 39 /* Relocate the image, if required. */
30 kernel_size = _edata - _text; 40 kernel_size = _edata - _text;
31 if (*image_addr != (dram_base + TEXT_OFFSET)) { 41 if (*image_addr != preferred_offset) {
32 kernel_memsize = kernel_size + (_end - _edata); 42 kernel_memsize = kernel_size + (_end - _edata);
33 43
34 /* 44 /*
@@ -42,7 +52,7 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
42 * Mustang), we can still place the kernel at the address 52 * Mustang), we can still place the kernel at the address
43 * 'dram_base + TEXT_OFFSET'. 53 * 'dram_base + TEXT_OFFSET'.
44 */ 54 */
45 *image_addr = *reserve_addr = dram_base + TEXT_OFFSET; 55 *image_addr = *reserve_addr = preferred_offset;
46 nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) / 56 nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
47 EFI_PAGE_SIZE; 57 EFI_PAGE_SIZE;
48 status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, 58 status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 13671a9cf016..61eb1d17586a 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -51,15 +51,6 @@ static struct mm_struct efi_mm = {
51 INIT_MM_CONTEXT(efi_mm) 51 INIT_MM_CONTEXT(efi_mm)
52}; 52};
53 53
54static int uefi_debug __initdata;
55static int __init uefi_debug_setup(char *str)
56{
57 uefi_debug = 1;
58
59 return 0;
60}
61early_param("uefi_debug", uefi_debug_setup);
62
63static int __init is_normal_ram(efi_memory_desc_t *md) 54static int __init is_normal_ram(efi_memory_desc_t *md)
64{ 55{
65 if (md->attribute & EFI_MEMORY_WB) 56 if (md->attribute & EFI_MEMORY_WB)
@@ -171,14 +162,14 @@ static __init void reserve_regions(void)
171 efi_memory_desc_t *md; 162 efi_memory_desc_t *md;
172 u64 paddr, npages, size; 163 u64 paddr, npages, size;
173 164
174 if (uefi_debug) 165 if (efi_enabled(EFI_DBG))
175 pr_info("Processing EFI memory map:\n"); 166 pr_info("Processing EFI memory map:\n");
176 167
177 for_each_efi_memory_desc(&memmap, md) { 168 for_each_efi_memory_desc(&memmap, md) {
178 paddr = md->phys_addr; 169 paddr = md->phys_addr;
179 npages = md->num_pages; 170 npages = md->num_pages;
180 171
181 if (uefi_debug) { 172 if (efi_enabled(EFI_DBG)) {
182 char buf[64]; 173 char buf[64];
183 174
184 pr_info(" 0x%012llx-0x%012llx %s", 175 pr_info(" 0x%012llx-0x%012llx %s",
@@ -194,11 +185,11 @@ static __init void reserve_regions(void)
194 185
195 if (is_reserve_region(md)) { 186 if (is_reserve_region(md)) {
196 memblock_reserve(paddr, size); 187 memblock_reserve(paddr, size);
197 if (uefi_debug) 188 if (efi_enabled(EFI_DBG))
198 pr_cont("*"); 189 pr_cont("*");
199 } 190 }
200 191
201 if (uefi_debug) 192 if (efi_enabled(EFI_DBG))
202 pr_cont("\n"); 193 pr_cont("\n");
203 } 194 }
204 195
@@ -210,14 +201,14 @@ void __init efi_init(void)
210 struct efi_fdt_params params; 201 struct efi_fdt_params params;
211 202
212 /* Grab UEFI information placed in FDT by stub */ 203 /* Grab UEFI information placed in FDT by stub */
213 if (!efi_get_fdt_params(&params, uefi_debug)) 204 if (!efi_get_fdt_params(&params))
214 return; 205 return;
215 206
216 efi_system_table = params.system_table; 207 efi_system_table = params.system_table;
217 208
218 memblock_reserve(params.mmap & PAGE_MASK, 209 memblock_reserve(params.mmap & PAGE_MASK,
219 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); 210 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
220 memmap.phys_map = (void *)params.mmap; 211 memmap.phys_map = params.mmap;
221 memmap.map = early_memremap(params.mmap, params.mmap_size); 212 memmap.map = early_memremap(params.mmap, params.mmap_size);
222 memmap.map_end = memmap.map + params.mmap_size; 213 memmap.map_end = memmap.map + params.mmap_size;
223 memmap.desc_size = params.desc_size; 214 memmap.desc_size = params.desc_size;
@@ -291,7 +282,7 @@ static int __init arm64_enable_runtime_services(void)
291 pr_info("Remapping and enabling EFI services.\n"); 282 pr_info("Remapping and enabling EFI services.\n");
292 283
293 mapsize = memmap.map_end - memmap.map; 284 mapsize = memmap.map_end - memmap.map;
294 memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, 285 memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
295 mapsize); 286 mapsize);
296 if (!memmap.map) { 287 if (!memmap.map) {
297 pr_err("Failed to remap EFI memory map\n"); 288 pr_err("Failed to remap EFI memory map\n");
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 90d09eddd5b2..351a4de1b1e2 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -498,6 +498,8 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
498 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 498 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
499 msr_s ICC_SRE_EL2, x0 499 msr_s ICC_SRE_EL2, x0
500 isb // Make sure SRE is now set 500 isb // Make sure SRE is now set
501 mrs_s x0, ICC_SRE_EL2 // Read SRE back,
502 tbz x0, #0, 3f // and check that it sticks
501 msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults 503 msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
502 504
5033: 5053:
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 407991bf79f5..ccb6078ed9f2 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackframe *frame)
48 48
49 frame->sp = fp + 0x10; 49 frame->sp = fp + 0x10;
50 frame->fp = *(unsigned long *)(fp); 50 frame->fp = *(unsigned long *)(fp);
51 /* 51 frame->pc = *(unsigned long *)(fp + 8);
52 * -4 here because we care about the PC at time of bl,
53 * not where the return will go.
54 */
55 frame->pc = *(unsigned long *)(fp + 8) - 4;
56 52
57 return 0; 53 return 0;
58} 54}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 8297d502217e..44ca4143b013 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -80,17 +80,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
80 if (ret == 0) { 80 if (ret == 0) {
81 /* 81 /*
82 * We are resuming from reset with TTBR0_EL1 set to the 82 * We are resuming from reset with TTBR0_EL1 set to the
83 * idmap to enable the MMU; restore the active_mm mappings in 83 * idmap to enable the MMU; set the TTBR0 to the reserved
84 * TTBR0_EL1 unless the active_mm == &init_mm, in which case 84 * page tables to prevent speculative TLB allocations, flush
85 * the thread entered cpu_suspend with TTBR0_EL1 set to 85 * the local tlb and set the default tcr_el1.t0sz so that
86 * reserved TTBR0 page tables and should be restored as such. 86 * the TTBR0 address space set-up is properly restored.
87 * If the current active_mm != &init_mm we entered cpu_suspend
88 * with mappings in TTBR0 that must be restored, so we switch
89 * them back to complete the address space configuration
90 * restoration before returning.
87 */ 91 */
88 if (mm == &init_mm) 92 cpu_set_reserved_ttbr0();
89 cpu_set_reserved_ttbr0();
90 else
91 cpu_switch_mm(mm->pgd, mm);
92
93 flush_tlb_all(); 93 flush_tlb_all();
94 cpu_set_default_tcr_t0sz();
95
96 if (mm != &init_mm)
97 cpu_switch_mm(mm->pgd, mm);
94 98
95 /* 99 /*
96 * Restore per-cpu offset before any kernel 100 * Restore per-cpu offset before any kernel
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 5c7e920e4861..ff5292c6277c 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -16,6 +16,9 @@ menuconfig VIRTUALIZATION
16 16
17if VIRTUALIZATION 17if VIRTUALIZATION
18 18
19config KVM_ARM_VGIC_V3
20 bool
21
19config KVM 22config KVM
20 bool "Kernel-based Virtual Machine (KVM) support" 23 bool "Kernel-based Virtual Machine (KVM) support"
21 depends on OF 24 depends on OF
@@ -31,6 +34,7 @@ config KVM
31 select KVM_VFIO 34 select KVM_VFIO
32 select HAVE_KVM_EVENTFD 35 select HAVE_KVM_EVENTFD
33 select HAVE_KVM_IRQFD 36 select HAVE_KVM_IRQFD
37 select KVM_ARM_VGIC_V3
34 ---help--- 38 ---help---
35 Support hosting virtualized guest machines. 39 Support hosting virtualized guest machines.
36 40
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index e4ee7bd8830a..7783ff05f74c 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -163,12 +163,14 @@ ENTRY(__cpu_setup)
163 * DEVICE_GRE 010 00001100 163 * DEVICE_GRE 010 00001100
164 * NORMAL_NC 011 01000100 164 * NORMAL_NC 011 01000100
165 * NORMAL 100 11111111 165 * NORMAL 100 11111111
166 * NORMAL_WT 101 10111011
166 */ 167 */
167 ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ 168 ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
168 MAIR(0x04, MT_DEVICE_nGnRE) | \ 169 MAIR(0x04, MT_DEVICE_nGnRE) | \
169 MAIR(0x0c, MT_DEVICE_GRE) | \ 170 MAIR(0x0c, MT_DEVICE_GRE) | \
170 MAIR(0x44, MT_NORMAL_NC) | \ 171 MAIR(0x44, MT_NORMAL_NC) | \
171 MAIR(0xff, MT_NORMAL) 172 MAIR(0xff, MT_NORMAL) | \
173 MAIR(0xbb, MT_NORMAL_WT)
172 msr mair_el1, x5 174 msr mair_el1, x5
173 /* 175 /*
174 * Prepare SCTLR 176 * Prepare SCTLR
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index 91146b416cdb..99b0a7984950 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -21,7 +21,6 @@
21#include <linux/leds_pwm.h> 21#include <linux/leds_pwm.h>
22#include <linux/input.h> 22#include <linux/input.h>
23#include <linux/gpio_keys.h> 23#include <linux/gpio_keys.h>
24#include <linux/atmel_serial.h>
25#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
26#include <linux/spi/ads7846.h> 25#include <linux/spi/ads7846.h>
27 26
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 97c9bdf83409..d74fd8ce980a 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -19,8 +19,8 @@
19 19
20#define ATOMIC_INIT(i) { (i) } 20#define ATOMIC_INIT(i) { (i) }
21 21
22#define atomic_read(v) ACCESS_ONCE((v)->counter) 22#define atomic_read(v) READ_ONCE((v)->counter)
23#define atomic_set(v, i) (((v)->counter) = i) 23#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
24 24
25#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \ 25#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \
26static inline int __atomic_##op##_return(int i, atomic_t *v) \ 26static inline int __atomic_##op##_return(int i, atomic_t *v) \
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
index ddcb45d7dfa7..43afc03e4125 100644
--- a/arch/c6x/platforms/megamod-pic.c
+++ b/arch/c6x/platforms/megamod-pic.c
@@ -178,7 +178,7 @@ static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
178static void __init parse_priority_map(struct megamod_pic *pic, 178static void __init parse_priority_map(struct megamod_pic *pic,
179 int *mapping, int size) 179 int *mapping, int size)
180{ 180{
181 struct device_node *np = pic->irqhost->of_node; 181 struct device_node *np = irq_domain_get_of_node(pic->irqhost);
182 const __be32 *map; 182 const __be32 *map;
183 int i, maplen; 183 int i, maplen;
184 u32 val; 184 u32 val;
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 8da5653bd895..e086f9e93728 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -57,7 +57,6 @@ config CRIS
57 select ARCH_WANT_IPC_PARSE_VERSION 57 select ARCH_WANT_IPC_PARSE_VERSION
58 select GENERIC_IRQ_SHOW 58 select GENERIC_IRQ_SHOW
59 select GENERIC_IOMAP 59 select GENERIC_IOMAP
60 select GENERIC_CMOS_UPDATE
61 select MODULES_USE_ELF_RELA 60 select MODULES_USE_ELF_RELA
62 select CLONE_BACKWARDS2 61 select CLONE_BACKWARDS2
63 select OLD_SIGSUSPEND 62 select OLD_SIGSUSPEND
diff --git a/arch/cris/arch-v10/kernel/head.S b/arch/cris/arch-v10/kernel/head.S
index 4a146e1749c9..a4877a421756 100644
--- a/arch/cris/arch-v10/kernel/head.S
+++ b/arch/cris/arch-v10/kernel/head.S
@@ -354,63 +354,6 @@ no_command_line:
354 blo 1b 354 blo 1b
355 nop 355 nop
356 356
357#ifdef CONFIG_BLK_DEV_ETRAXIDE
358 ;; disable ATA before enabling it in genconfig below
359 moveq 0,$r0
360 move.d $r0,[R_ATA_CTRL_DATA]
361 move.d $r0,[R_ATA_TRANSFER_CNT]
362 move.d $r0,[R_ATA_CONFIG]
363#if 0
364 move.d R_PORT_G_DATA, $r1
365 move.d $r0, [$r1]; assert ATA bus-reset
366 nop
367 nop
368 nop
369 nop
370 nop
371 nop
372 move.d 0x08000000,$r0
373 move.d $r0,[$r1]
374#endif
375#endif
376
377#ifdef CONFIG_JULIETTE
378 ;; configure external DMA channel 0 before enabling it in genconfig
379
380 moveq 0,$r0
381 move.d $r0,[R_EXT_DMA_0_ADDR]
382 ; cnt enable, word size, output, stop, size 0
383 move.d IO_STATE (R_EXT_DMA_0_CMD, cnt, enable) \
384 | IO_STATE (R_EXT_DMA_0_CMD, rqpol, ahigh) \
385 | IO_STATE (R_EXT_DMA_0_CMD, apol, ahigh) \
386 | IO_STATE (R_EXT_DMA_0_CMD, rq_ack, burst) \
387 | IO_STATE (R_EXT_DMA_0_CMD, wid, word) \
388 | IO_STATE (R_EXT_DMA_0_CMD, dir, output) \
389 | IO_STATE (R_EXT_DMA_0_CMD, run, stop) \
390 | IO_FIELD (R_EXT_DMA_0_CMD, trf_count, 0),$r0
391 move.d $r0,[R_EXT_DMA_0_CMD]
392
393 ;; reset dma4 and wait for completion
394
395 moveq IO_STATE (R_DMA_CH4_CMD, cmd, reset),$r0
396 move.b $r0,[R_DMA_CH4_CMD]
3971: move.b [R_DMA_CH4_CMD],$r0
398 and.b IO_MASK (R_DMA_CH4_CMD, cmd),$r0
399 cmp.b IO_STATE (R_DMA_CH4_CMD, cmd, reset),$r0
400 beq 1b
401 nop
402
403 ;; reset dma5 and wait for completion
404
405 moveq IO_STATE (R_DMA_CH5_CMD, cmd, reset),$r0
406 move.b $r0,[R_DMA_CH5_CMD]
4071: move.b [R_DMA_CH5_CMD],$r0
408 and.b IO_MASK (R_DMA_CH5_CMD, cmd),$r0
409 cmp.b IO_STATE (R_DMA_CH5_CMD, cmd, reset),$r0
410 beq 1b
411 nop
412#endif
413
414 ;; Etrax product HW genconfig setup 357 ;; Etrax product HW genconfig setup
415 358
416 moveq 0,$r0 359 moveq 0,$r0
@@ -447,21 +390,6 @@ no_command_line:
447 | IO_STATE (R_GEN_CONFIG, dma9, usb),$r0 390 | IO_STATE (R_GEN_CONFIG, dma9, usb),$r0
448 391
449 392
450#if defined(CONFIG_ETRAX_DEF_R_PORT_G0_DIR_OUT)
451 or.d IO_STATE (R_GEN_CONFIG, g0dir, out),$r0
452#endif
453
454#if defined(CONFIG_ETRAX_DEF_R_PORT_G8_15_DIR_OUT)
455 or.d IO_STATE (R_GEN_CONFIG, g8_15dir, out),$r0
456#endif
457#if defined(CONFIG_ETRAX_DEF_R_PORT_G16_23_DIR_OUT)
458 or.d IO_STATE (R_GEN_CONFIG, g16_23dir, out),$r0
459#endif
460
461#if defined(CONFIG_ETRAX_DEF_R_PORT_G24_DIR_OUT)
462 or.d IO_STATE (R_GEN_CONFIG, g24dir, out),$r0
463#endif
464
465 move.d $r0,[genconfig_shadow] ; init a shadow register of R_GEN_CONFIG 393 move.d $r0,[genconfig_shadow] ; init a shadow register of R_GEN_CONFIG
466 394
467 move.d $r0,[R_GEN_CONFIG] 395 move.d $r0,[R_GEN_CONFIG]
@@ -500,19 +428,9 @@ no_command_line:
500 ;; including their shadow registers 428 ;; including their shadow registers
501 429
502 move.b CONFIG_ETRAX_DEF_R_PORT_PA_DIR,$r0 430 move.b CONFIG_ETRAX_DEF_R_PORT_PA_DIR,$r0
503#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_PA7)
504 or.b IO_STATE (R_PORT_PA_DIR, dir7, output),$r0
505#endif
506 move.b $r0,[port_pa_dir_shadow] 431 move.b $r0,[port_pa_dir_shadow]
507 move.b $r0,[R_PORT_PA_DIR] 432 move.b $r0,[R_PORT_PA_DIR]
508 move.b CONFIG_ETRAX_DEF_R_PORT_PA_DATA,$r0 433 move.b CONFIG_ETRAX_DEF_R_PORT_PA_DATA,$r0
509#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_PA7)
510#if defined(CONFIG_BLUETOOTH_RESET_ACTIVE_HIGH)
511 and.b ~(1 << 7),$r0
512#else
513 or.b (1 << 7),$r0
514#endif
515#endif
516 move.b $r0,[port_pa_data_shadow] 434 move.b $r0,[port_pa_data_shadow]
517 move.b $r0,[R_PORT_PA_DATA] 435 move.b $r0,[R_PORT_PA_DATA]
518 436
@@ -520,19 +438,9 @@ no_command_line:
520 move.b $r0,[port_pb_config_shadow] 438 move.b $r0,[port_pb_config_shadow]
521 move.b $r0,[R_PORT_PB_CONFIG] 439 move.b $r0,[R_PORT_PB_CONFIG]
522 move.b CONFIG_ETRAX_DEF_R_PORT_PB_DIR,$r0 440 move.b CONFIG_ETRAX_DEF_R_PORT_PB_DIR,$r0
523#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_PB5)
524 or.b IO_STATE (R_PORT_PB_DIR, dir5, output),$r0
525#endif
526 move.b $r0,[port_pb_dir_shadow] 441 move.b $r0,[port_pb_dir_shadow]
527 move.b $r0,[R_PORT_PB_DIR] 442 move.b $r0,[R_PORT_PB_DIR]
528 move.b CONFIG_ETRAX_DEF_R_PORT_PB_DATA,$r0 443 move.b CONFIG_ETRAX_DEF_R_PORT_PB_DATA,$r0
529#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_PB5)
530#if defined(CONFIG_BLUETOOTH_RESET_ACTIVE_HIGH)
531 and.b ~(1 << 5),$r0
532#else
533 or.b (1 << 5),$r0
534#endif
535#endif
536 move.b $r0,[port_pb_data_shadow] 444 move.b $r0,[port_pb_data_shadow]
537 move.b $r0,[R_PORT_PB_DATA] 445 move.b $r0,[R_PORT_PB_DATA]
538 446
@@ -541,20 +449,6 @@ no_command_line:
541 move.d $r0, [R_PORT_PB_I2C] 449 move.d $r0, [R_PORT_PB_I2C]
542 450
543 moveq 0,$r0 451 moveq 0,$r0
544#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_G10)
545#if defined(CONFIG_BLUETOOTH_RESET_ACTIVE_HIGH)
546 and.d ~(1 << 10),$r0
547#else
548 or.d (1 << 10),$r0
549#endif
550#endif
551#if defined(CONFIG_BLUETOOTH) && defined(CONFIG_BLUETOOTH_RESET_G11)
552#if defined(CONFIG_BLUETOOTH_RESET_ACTIVE_HIGH)
553 and.d ~(1 << 11),$r0
554#else
555 or.d (1 << 11),$r0
556#endif
557#endif
558 move.d $r0,[port_g_data_shadow] 452 move.d $r0,[port_g_data_shadow]
559 move.d $r0,[R_PORT_G_DATA] 453 move.d $r0,[R_PORT_G_DATA]
560 454
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index 22d846bfc570..ed71ade93a73 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -275,7 +275,7 @@ static char remcomOutBuffer[BUFMAX];
275/* Error and warning messages. */ 275/* Error and warning messages. */
276enum error_type 276enum error_type
277{ 277{
278 SUCCESS, E01, E02, E03, E04, E05, E06, E07 278 SUCCESS, E01, E02, E03, E04, E05, E06, E07, E08
279}; 279};
280static char *error_message[] = 280static char *error_message[] =
281{ 281{
@@ -286,7 +286,8 @@ static char *error_message[] =
286 "E04 The command is not supported - [s,C,S,!,R,d,r] - internal error.", 286 "E04 The command is not supported - [s,C,S,!,R,d,r] - internal error.",
287 "E05 Change register content - P - the register is not implemented..", 287 "E05 Change register content - P - the register is not implemented..",
288 "E06 Change memory content - M - internal error.", 288 "E06 Change memory content - M - internal error.",
289 "E07 Change register content - P - the register is not stored on the stack" 289 "E07 Change register content - P - the register is not stored on the stack",
290 "E08 Invalid parameter"
290}; 291};
291/********************************* Register image ****************************/ 292/********************************* Register image ****************************/
292/* Use the order of registers as defined in "AXIS ETRAX CRIS Programmer's 293/* Use the order of registers as defined in "AXIS ETRAX CRIS Programmer's
@@ -351,7 +352,7 @@ char internal_stack[INTERNAL_STACK_SIZE];
351 breakpoint to be handled. A static breakpoint uses the content of register 352 breakpoint to be handled. A static breakpoint uses the content of register
352 BRP as it is whereas a dynamic breakpoint requires subtraction with 2 353 BRP as it is whereas a dynamic breakpoint requires subtraction with 2
353 in order to execute the instruction. The first breakpoint is static. */ 354 in order to execute the instruction. The first breakpoint is static. */
354static unsigned char is_dyn_brkp = 0; 355static unsigned char __used is_dyn_brkp;
355 356
356/********************************* String library ****************************/ 357/********************************* String library ****************************/
357/* Single-step over library functions creates trap loops. */ 358/* Single-step over library functions creates trap loops. */
@@ -413,18 +414,6 @@ gdb_cris_strtol (const char *s, char **endptr, int base)
413} 414}
414 415
415/********************************** Packet I/O ******************************/ 416/********************************** Packet I/O ******************************/
416/* Returns the integer equivalent of a hexadecimal character. */
417static int
418hex (char ch)
419{
420 if ((ch >= 'a') && (ch <= 'f'))
421 return (ch - 'a' + 10);
422 if ((ch >= '0') && (ch <= '9'))
423 return (ch - '0');
424 if ((ch >= 'A') && (ch <= 'F'))
425 return (ch - 'A' + 10);
426 return (-1);
427}
428 417
429/* Convert the memory, pointed to by mem into hexadecimal representation. 418/* Convert the memory, pointed to by mem into hexadecimal representation.
430 Put the result in buf, and return a pointer to the last character 419 Put the result in buf, and return a pointer to the last character
@@ -455,22 +444,6 @@ mem2hex(char *buf, unsigned char *mem, int count)
455 return (buf); 444 return (buf);
456} 445}
457 446
458/* Convert the array, in hexadecimal representation, pointed to by buf into
459 binary representation. Put the result in mem, and return a pointer to
460 the character after the last byte written. */
461static unsigned char*
462hex2mem (unsigned char *mem, char *buf, int count)
463{
464 int i;
465 unsigned char ch;
466 for (i = 0; i < count; i++) {
467 ch = hex (*buf++) << 4;
468 ch = ch + hex (*buf++);
469 *mem++ = ch;
470 }
471 return (mem);
472}
473
474/* Put the content of the array, in binary representation, pointed to by buf 447/* Put the content of the array, in binary representation, pointed to by buf
475 into memory pointed to by mem, and return a pointer to the character after 448 into memory pointed to by mem, and return a pointer to the character after
476 the last byte written. 449 the last byte written.
@@ -524,8 +497,8 @@ getpacket (char *buffer)
524 buffer[count] = '\0'; 497 buffer[count] = '\0';
525 498
526 if (ch == '#') { 499 if (ch == '#') {
527 xmitcsum = hex (getDebugChar ()) << 4; 500 xmitcsum = hex_to_bin(getDebugChar()) << 4;
528 xmitcsum += hex (getDebugChar ()); 501 xmitcsum += hex_to_bin(getDebugChar());
529 if (checksum != xmitcsum) { 502 if (checksum != xmitcsum) {
530 /* Wrong checksum */ 503 /* Wrong checksum */
531 putDebugChar ('-'); 504 putDebugChar ('-');
@@ -599,7 +572,7 @@ putDebugString (const unsigned char *str, int length)
599 572
600/********************************* Register image ****************************/ 573/********************************* Register image ****************************/
601/* Write a value to a specified register in the register image of the current 574/* Write a value to a specified register in the register image of the current
602 thread. Returns status code SUCCESS, E02 or E05. */ 575 thread. Returns status code SUCCESS, E02, E05 or E08. */
603static int 576static int
604write_register (int regno, char *val) 577write_register (int regno, char *val)
605{ 578{
@@ -608,8 +581,9 @@ write_register (int regno, char *val)
608 581
609 if (regno >= R0 && regno <= PC) { 582 if (regno >= R0 && regno <= PC) {
610 /* 32-bit register with simple offset. */ 583 /* 32-bit register with simple offset. */
611 hex2mem ((unsigned char *)current_reg + regno * sizeof(unsigned int), 584 if (hex2bin((unsigned char *)current_reg + regno * sizeof(unsigned int),
612 val, sizeof(unsigned int)); 585 val, sizeof(unsigned int)))
586 status = E08;
613 } 587 }
614 else if (regno == P0 || regno == VR || regno == P4 || regno == P8) { 588 else if (regno == P0 || regno == VR || regno == P4 || regno == P8) {
615 /* Do not support read-only registers. */ 589 /* Do not support read-only registers. */
@@ -618,13 +592,15 @@ write_register (int regno, char *val)
618 else if (regno == CCR) { 592 else if (regno == CCR) {
619 /* 16 bit register with complex offset. (P4 is read-only, P6 is not implemented, 593 /* 16 bit register with complex offset. (P4 is read-only, P6 is not implemented,
620 and P7 (MOF) is 32 bits in ETRAX 100LX. */ 594 and P7 (MOF) is 32 bits in ETRAX 100LX. */
621 hex2mem ((unsigned char *)&(current_reg->ccr) + (regno-CCR) * sizeof(unsigned short), 595 if (hex2bin((unsigned char *)&(current_reg->ccr) + (regno-CCR) * sizeof(unsigned short),
622 val, sizeof(unsigned short)); 596 val, sizeof(unsigned short)))
597 status = E08;
623 } 598 }
624 else if (regno >= MOF && regno <= USP) { 599 else if (regno >= MOF && regno <= USP) {
625 /* 32 bit register with complex offset. (P8 has been taken care of.) */ 600 /* 32 bit register with complex offset. (P8 has been taken care of.) */
626 hex2mem ((unsigned char *)&(current_reg->ibr) + (regno-IBR) * sizeof(unsigned int), 601 if (hex2bin((unsigned char *)&(current_reg->ibr) + (regno-IBR) * sizeof(unsigned int),
627 val, sizeof(unsigned int)); 602 val, sizeof(unsigned int)))
603 status = E08;
628 } 604 }
629 else { 605 else {
630 /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */ 606 /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */
@@ -759,9 +735,11 @@ handle_exception (int sigval)
759 /* Write registers. GXX..XX 735 /* Write registers. GXX..XX
760 Each byte of register data is described by two hex digits. 736 Each byte of register data is described by two hex digits.
761 Success: OK 737 Success: OK
762 Failure: void. */ 738 Failure: E08. */
763 hex2mem((char *)&cris_reg, &remcomInBuffer[1], sizeof(registers)); 739 if (hex2bin((char *)&cris_reg, &remcomInBuffer[1], sizeof(registers)))
764 gdb_cris_strcpy (remcomOutBuffer, "OK"); 740 gdb_cris_strcpy (remcomOutBuffer, error_message[E08]);
741 else
742 gdb_cris_strcpy (remcomOutBuffer, "OK");
765 break; 743 break;
766 744
767 case 'P': 745 case 'P':
@@ -771,7 +749,7 @@ handle_exception (int sigval)
771 for each byte in the register (target byte order). P1f=11223344 means 749 for each byte in the register (target byte order). P1f=11223344 means
772 set register 31 to 44332211. 750 set register 31 to 44332211.
773 Success: OK 751 Success: OK
774 Failure: E02, E05 */ 752 Failure: E02, E05, E08 */
775 { 753 {
776 char *suffix; 754 char *suffix;
777 int regno = gdb_cris_strtol (&remcomInBuffer[1], &suffix, 16); 755 int regno = gdb_cris_strtol (&remcomInBuffer[1], &suffix, 16);
@@ -791,6 +769,10 @@ handle_exception (int sigval)
791 /* Do not support non-existing registers on the stack. */ 769 /* Do not support non-existing registers on the stack. */
792 gdb_cris_strcpy (remcomOutBuffer, error_message[E07]); 770 gdb_cris_strcpy (remcomOutBuffer, error_message[E07]);
793 break; 771 break;
772 case E08:
773 /* Invalid parameter. */
774 gdb_cris_strcpy (remcomOutBuffer, error_message[E08]);
775 break;
794 default: 776 default:
795 /* Valid register number. */ 777 /* Valid register number. */
796 gdb_cris_strcpy (remcomOutBuffer, "OK"); 778 gdb_cris_strcpy (remcomOutBuffer, "OK");
@@ -826,7 +808,7 @@ handle_exception (int sigval)
826 AA..AA is the start address, LLLL is the number of bytes, and 808 AA..AA is the start address, LLLL is the number of bytes, and
827 XX..XX is the hexadecimal data. 809 XX..XX is the hexadecimal data.
828 Success: OK 810 Success: OK
829 Failure: void. */ 811 Failure: E08. */
830 { 812 {
831 char *lenptr; 813 char *lenptr;
832 char *dataptr; 814 char *dataptr;
@@ -835,14 +817,15 @@ handle_exception (int sigval)
835 int length = gdb_cris_strtol(lenptr+1, &dataptr, 16); 817 int length = gdb_cris_strtol(lenptr+1, &dataptr, 16);
836 if (*lenptr == ',' && *dataptr == ':') { 818 if (*lenptr == ',' && *dataptr == ':') {
837 if (remcomInBuffer[0] == 'M') { 819 if (remcomInBuffer[0] == 'M') {
838 hex2mem(addr, dataptr + 1, length); 820 if (hex2bin(addr, dataptr + 1, length))
839 } 821 gdb_cris_strcpy (remcomOutBuffer, error_message[E08]);
840 else /* X */ { 822 else
823 gdb_cris_strcpy (remcomOutBuffer, "OK");
824 } else /* X */ {
841 bin2mem(addr, dataptr + 1, length); 825 bin2mem(addr, dataptr + 1, length);
826 gdb_cris_strcpy (remcomOutBuffer, "OK");
842 } 827 }
843 gdb_cris_strcpy (remcomOutBuffer, "OK"); 828 } else {
844 }
845 else {
846 gdb_cris_strcpy (remcomOutBuffer, error_message[E06]); 829 gdb_cris_strcpy (remcomOutBuffer, error_message[E06]);
847 } 830 }
848 } 831 }
@@ -970,7 +953,7 @@ asm ("\n"
970" move $ibr,[cris_reg+0x4E] ; P9,\n" 953" move $ibr,[cris_reg+0x4E] ; P9,\n"
971" move $irp,[cris_reg+0x52] ; P10,\n" 954" move $irp,[cris_reg+0x52] ; P10,\n"
972" move $srp,[cris_reg+0x56] ; P11,\n" 955" move $srp,[cris_reg+0x56] ; P11,\n"
973" move $dtp0,[cris_reg+0x5A] ; P12, register BAR, assembler might not know BAR\n" 956" move $bar,[cris_reg+0x5A] ; P12,\n"
974" ; P13, register DCCR already saved\n" 957" ; P13, register DCCR already saved\n"
975";; Due to the old assembler-versions BRP might not be recognized\n" 958";; Due to the old assembler-versions BRP might not be recognized\n"
976" .word 0xE670 ; move brp,r0\n" 959" .word 0xE670 ; move brp,r0\n"
@@ -1063,7 +1046,7 @@ asm ("\n"
1063" move $ibr,[cris_reg+0x4E] ; P9,\n" 1046" move $ibr,[cris_reg+0x4E] ; P9,\n"
1064" move $irp,[cris_reg+0x52] ; P10,\n" 1047" move $irp,[cris_reg+0x52] ; P10,\n"
1065" move $srp,[cris_reg+0x56] ; P11,\n" 1048" move $srp,[cris_reg+0x56] ; P11,\n"
1066" move $dtp0,[cris_reg+0x5A] ; P12, register BAR, assembler might not know BAR\n" 1049" move $bar,[cris_reg+0x5A] ; P12,\n"
1067" ; P13, register DCCR already saved\n" 1050" ; P13, register DCCR already saved\n"
1068";; Due to the old assembler-versions BRP might not be recognized\n" 1051";; Due to the old assembler-versions BRP might not be recognized\n"
1069" .word 0xE670 ; move brp,r0\n" 1052" .word 0xE670 ; move brp,r0\n"
diff --git a/arch/cris/arch-v10/mm/init.c b/arch/cris/arch-v10/mm/init.c
index e7f8066105aa..85e3f1b1f3ac 100644
--- a/arch/cris/arch-v10/mm/init.c
+++ b/arch/cris/arch-v10/mm/init.c
@@ -68,14 +68,10 @@ paging_init(void)
68 68
69 *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* bootrom */ 69 *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* bootrom */
70 IO_STATE(R_MMU_KSEG, seg_e, page ) | 70 IO_STATE(R_MMU_KSEG, seg_e, page ) |
71 IO_STATE(R_MMU_KSEG, seg_d, page ) | 71 IO_STATE(R_MMU_KSEG, seg_d, page ) |
72 IO_STATE(R_MMU_KSEG, seg_c, page ) | 72 IO_STATE(R_MMU_KSEG, seg_c, page ) |
73 IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */ 73 IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */
74#ifdef CONFIG_JULIETTE
75 IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* ARTPEC etc. */
76#else
77 IO_STATE(R_MMU_KSEG, seg_a, page ) | 74 IO_STATE(R_MMU_KSEG, seg_a, page ) |
78#endif
79 IO_STATE(R_MMU_KSEG, seg_9, seg ) | /* LED's on some boards */ 75 IO_STATE(R_MMU_KSEG, seg_9, seg ) | /* LED's on some boards */
80 IO_STATE(R_MMU_KSEG, seg_8, seg ) | /* CSE0/1, flash and I/O */ 76 IO_STATE(R_MMU_KSEG, seg_8, seg ) | /* CSE0/1, flash and I/O */
81 IO_STATE(R_MMU_KSEG, seg_7, page ) | /* kernel vmalloc area */ 77 IO_STATE(R_MMU_KSEG, seg_7, page ) | /* kernel vmalloc area */
@@ -92,14 +88,10 @@ paging_init(void)
92 IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | 88 IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
93 IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) | 89 IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) |
94 IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | 90 IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
95#ifdef CONFIG_JULIETTE
96 IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) |
97#else
98 IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) | 91 IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) |
99#endif
100 IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) | 92 IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) |
101 IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) ); 93 IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) );
102 94
103 *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | 95 *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
104 IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) | 96 IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) |
105 IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | 97 IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
diff --git a/arch/cris/arch-v32/Kconfig b/arch/cris/arch-v32/Kconfig
index 21bbd93be34f..17dbe03af5f4 100644
--- a/arch/cris/arch-v32/Kconfig
+++ b/arch/cris/arch-v32/Kconfig
@@ -11,95 +11,6 @@ config ETRAX_DRAM_VIRTUAL_BASE
11 default "c0000000" 11 default "c0000000"
12 12
13choice 13choice
14 prompt "Nbr of Ethernet LED groups"
15 depends on ETRAX_ARCH_V32
16 default ETRAX_NBR_LED_GRP_ONE
17 help
18 Select how many Ethernet LED groups that can be used. Usually one per Ethernet
19 interface is a good choice.
20
21config ETRAX_NBR_LED_GRP_ZERO
22 bool "Use zero LED groups"
23 help
24 Select this if you do not want any Ethernet LEDs.
25
26config ETRAX_NBR_LED_GRP_ONE
27 bool "Use one LED group"
28 help
29 Select this if you want one Ethernet LED group. This LED group
30 can be used for one or more Ethernet interfaces. However, it is
31 recommended that each Ethernet interface use a dedicated LED group.
32
33config ETRAX_NBR_LED_GRP_TWO
34 bool "Use two LED groups"
35 help
36 Select this if you want two Ethernet LED groups. This is the
37 best choice if you have more than one Ethernet interface and
38 would like to have separate LEDs for the interfaces.
39
40endchoice
41
42config ETRAX_LED_G_NET0
43 string "Ethernet LED group 0 green LED bit"
44 depends on ETRAX_ARCH_V32 && (ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO)
45 default "PA3"
46 help
47 Bit to use for the green LED in Ethernet LED group 0.
48
49config ETRAX_LED_R_NET0
50 string "Ethernet LED group 0 red LED bit"
51 depends on ETRAX_ARCH_V32 && (ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO)
52 default "PA4"
53 help
54 Bit to use for the red LED in Ethernet LED group 0.
55
56config ETRAX_LED_G_NET1
57 string "Ethernet group 1 green LED bit"
58 depends on ETRAX_ARCH_V32 && ETRAX_NBR_LED_GRP_TWO
59 default ""
60 help
61 Bit to use for the green LED in Ethernet LED group 1.
62
63config ETRAX_LED_R_NET1
64 string "Ethernet group 1 red LED bit"
65 depends on ETRAX_ARCH_V32 && ETRAX_NBR_LED_GRP_TWO
66 default ""
67 help
68 Bit to use for the red LED in Ethernet LED group 1.
69
70config ETRAX_V32_LED2G
71 string "Second green LED bit"
72 depends on ETRAX_ARCH_V32
73 default "PA5"
74 help
75 Bit to use for the first green LED (status LED).
76 Most Axis products use bit A5 here.
77
78config ETRAX_V32_LED2R
79 string "Second red LED bit"
80 depends on ETRAX_ARCH_V32
81 default "PA6"
82 help
83 Bit to use for the first red LED (network LED).
84 Most Axis products use bit A6 here.
85
86config ETRAX_V32_LED3G
87 string "Third green LED bit"
88 depends on ETRAX_ARCH_V32
89 default "PA7"
90 help
91 Bit to use for the first green LED (drive/power LED).
92 Most Axis products use bit A7 here.
93
94config ETRAX_V32_LED3R
95 string "Third red LED bit"
96 depends on ETRAX_ARCH_V32
97 default "PA7"
98 help
99 Bit to use for the first red LED (drive/power LED).
100 Most Axis products use bit A7 here.
101
102choice
103 prompt "Kernel GDB port" 14 prompt "Kernel GDB port"
104 depends on ETRAX_KGDB 15 depends on ETRAX_KGDB
105 default ETRAX_KGDB_PORT0 16 default ETRAX_KGDB_PORT0
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index e6c523cc40bc..2735eb7671a5 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -149,173 +149,6 @@ config ETRAX_NANDBOOT
149 Say Y if your boot code, kernel and root file system is in 149 Say Y if your boot code, kernel and root file system is in
150 NAND flash. Say N if they are in NOR flash. 150 NAND flash. Say N if they are in NOR flash.
151 151
152config ETRAX_I2C
153 bool "I2C driver"
154 depends on ETRAX_ARCH_V32
155 help
156 This option enables the I2C driver used by e.g. the RTC driver.
157
158config ETRAX_V32_I2C_DATA_PORT
159 string "I2C data pin"
160 depends on ETRAX_I2C
161 help
162 The pin to use for I2C data.
163
164config ETRAX_V32_I2C_CLK_PORT
165 string "I2C clock pin"
166 depends on ETRAX_I2C
167 help
168 The pin to use for I2C clock.
169
170config ETRAX_GPIO
171 bool "GPIO support"
172 depends on ETRAX_ARCH_V32
173 ---help---
174 Enables the ETRAX general port device (major 120, minors 0-4).
175 You can use this driver to access the general port bits. It supports
176 these ioctl's:
177 #include <linux/etraxgpio.h>
178 fd = open("/dev/gpioa", O_RDWR); // or /dev/gpiob
179 ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_SETBITS), bits_to_set);
180 ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_CLRBITS), bits_to_clear);
181 err = ioctl(fd, _IO(ETRAXGPIO_IOCTYPE, IO_READ_INBITS), &val);
182 Remember that you need to setup the port directions appropriately in
183 the General configuration.
184
185config ETRAX_VIRTUAL_GPIO
186 bool "Virtual GPIO support"
187 depends on ETRAX_GPIO
188 help
189 Enables the virtual Etrax general port device (major 120, minor 6).
190 It uses an I/O expander for the I2C-bus.
191
192config ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN
193 int "Virtual GPIO interrupt pin on PA pin"
194 range 0 7
195 depends on ETRAX_VIRTUAL_GPIO
196 help
197 The pin to use on PA for virtual gpio interrupt.
198
199config ETRAX_PA_CHANGEABLE_DIR
200 hex "PA user changeable dir mask"
201 depends on ETRAX_GPIO
202 default "0x00" if ETRAXFS
203 default "0x00000000" if !ETRAXFS
204 help
205 This is a bitmask with information of what bits in PA that a
206 user can change direction on using ioctl's.
207 Bit set = changeable.
208 You probably want 0 here, but it depends on your hardware.
209
210config ETRAX_PA_CHANGEABLE_BITS
211 hex "PA user changeable bits mask"
212 depends on ETRAX_GPIO
213 default "0x00" if ETRAXFS
214 default "0x00000000" if !ETRAXFS
215 help
216 This is a bitmask with information of what bits in PA
217 that a user can change the value on using ioctl's.
218 Bit set = changeable.
219
220config ETRAX_PB_CHANGEABLE_DIR
221 hex "PB user changeable dir mask"
222 depends on ETRAX_GPIO
223 default "0x00000" if ETRAXFS
224 default "0x00000000" if !ETRAXFS
225 help
226 This is a bitmask with information of what bits in PB
227 that a user can change direction on using ioctl's.
228 Bit set = changeable.
229 You probably want 0 here, but it depends on your hardware.
230
231config ETRAX_PB_CHANGEABLE_BITS
232 hex "PB user changeable bits mask"
233 depends on ETRAX_GPIO
234 default "0x00000" if ETRAXFS
235 default "0x00000000" if !ETRAXFS
236 help
237 This is a bitmask with information of what bits in PB
238 that a user can change the value on using ioctl's.
239 Bit set = changeable.
240
241config ETRAX_PC_CHANGEABLE_DIR
242 hex "PC user changeable dir mask"
243 depends on ETRAX_GPIO
244 default "0x00000" if ETRAXFS
245 default "0x00000000" if !ETRAXFS
246 help
247 This is a bitmask with information of what bits in PC
248 that a user can change direction on using ioctl's.
249 Bit set = changeable.
250 You probably want 0 here, but it depends on your hardware.
251
252config ETRAX_PC_CHANGEABLE_BITS
253 hex "PC user changeable bits mask"
254 depends on ETRAX_GPIO
255 default "0x00000" if ETRAXFS
256 default "0x00000000" if !ETRAXFS
257 help
258 This is a bitmask with information of what bits in PC
259 that a user can change the value on using ioctl's.
260 Bit set = changeable.
261
262config ETRAX_PD_CHANGEABLE_DIR
263 hex "PD user changeable dir mask"
264 depends on ETRAX_GPIO && ETRAXFS
265 default "0x00000"
266 help
267 This is a bitmask with information of what bits in PD
268 that a user can change direction on using ioctl's.
269 Bit set = changeable.
270 You probably want 0x00000 here, but it depends on your hardware.
271
272config ETRAX_PD_CHANGEABLE_BITS
273 hex "PD user changeable bits mask"
274 depends on ETRAX_GPIO && ETRAXFS
275 default "0x00000"
276 help
277 This is a bitmask (18 bits) with information of what bits in PD
278 that a user can change the value on using ioctl's.
279 Bit set = changeable.
280
281config ETRAX_PE_CHANGEABLE_DIR
282 hex "PE user changeable dir mask"
283 depends on ETRAX_GPIO && ETRAXFS
284 default "0x00000"
285 help
286 This is a bitmask (18 bits) with information of what bits in PE
287 that a user can change direction on using ioctl's.
288 Bit set = changeable.
289 You probably want 0x00000 here, but it depends on your hardware.
290
291config ETRAX_PE_CHANGEABLE_BITS
292 hex "PE user changeable bits mask"
293 depends on ETRAX_GPIO && ETRAXFS
294 default "0x00000"
295 help
296 This is a bitmask (18 bits) with information of what bits in PE
297 that a user can change the value on using ioctl's.
298 Bit set = changeable.
299
300config ETRAX_PV_CHANGEABLE_DIR
301 hex "PV user changeable dir mask"
302 depends on ETRAX_VIRTUAL_GPIO
303 default "0x0000"
304 help
305 This is a bitmask (16 bits) with information of what bits in PV
306 that a user can change direction on using ioctl's.
307 Bit set = changeable.
308 You probably want 0x0000 here, but it depends on your hardware.
309
310config ETRAX_PV_CHANGEABLE_BITS
311 hex "PV user changeable bits mask"
312 depends on ETRAX_VIRTUAL_GPIO
313 default "0x0000"
314 help
315 This is a bitmask (16 bits) with information of what bits in PV
316 that a user can change the value on using ioctl's.
317 Bit set = changeable.
318
319config ETRAX_CARDBUS 152config ETRAX_CARDBUS
320 bool "Cardbus support" 153 bool "Cardbus support"
321 depends on ETRAX_ARCH_V32 154 depends on ETRAX_ARCH_V32
diff --git a/arch/cris/arch-v32/drivers/Makefile b/arch/cris/arch-v32/drivers/Makefile
index 15fbfefced2c..b5a75fdce77b 100644
--- a/arch/cris/arch-v32/drivers/Makefile
+++ b/arch/cris/arch-v32/drivers/Makefile
@@ -7,6 +7,5 @@ obj-$(CONFIG_ETRAX_AXISFLASHMAP) += axisflashmap.o
7obj-$(CONFIG_ETRAXFS) += mach-fs/ 7obj-$(CONFIG_ETRAXFS) += mach-fs/
8obj-$(CONFIG_CRIS_MACH_ARTPEC3) += mach-a3/ 8obj-$(CONFIG_CRIS_MACH_ARTPEC3) += mach-a3/
9obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o 9obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o
10obj-$(CONFIG_ETRAX_I2C) += i2c.o
11obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o 10obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o
12obj-$(CONFIG_PCI) += pci/ 11obj-$(CONFIG_PCI) += pci/
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 5387424683cc..c6309a182f46 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -361,7 +361,7 @@ static int __init init_axis_flash(void)
361 361
362#if 0 /* Dump flash memory so we can see what is going on */ 362#if 0 /* Dump flash memory so we can see what is going on */
363 if (main_mtd) { 363 if (main_mtd) {
364 int sectoraddr, i; 364 int sectoraddr;
365 for (sectoraddr = 0; sectoraddr < 2*65536+4096; 365 for (sectoraddr = 0; sectoraddr < 2*65536+4096;
366 sectoraddr += PAGESIZE) { 366 sectoraddr += PAGESIZE) {
367 main_mtd->read(main_mtd, sectoraddr, PAGESIZE, &len, 367 main_mtd->read(main_mtd, sectoraddr, PAGESIZE, &len,
@@ -369,21 +369,7 @@ static int __init init_axis_flash(void)
369 printk(KERN_INFO 369 printk(KERN_INFO
370 "Sector at %d (length %d):\n", 370 "Sector at %d (length %d):\n",
371 sectoraddr, len); 371 sectoraddr, len);
372 for (i = 0; i < PAGESIZE; i += 16) { 372 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, page, PAGESIZE, false);
373 printk(KERN_INFO
374 "%02x %02x %02x %02x "
375 "%02x %02x %02x %02x "
376 "%02x %02x %02x %02x "
377 "%02x %02x %02x %02x\n",
378 page[i] & 255, page[i+1] & 255,
379 page[i+2] & 255, page[i+3] & 255,
380 page[i+4] & 255, page[i+5] & 255,
381 page[i+6] & 255, page[i+7] & 255,
382 page[i+8] & 255, page[i+9] & 255,
383 page[i+10] & 255, page[i+11] & 255,
384 page[i+12] & 255, page[i+13] & 255,
385 page[i+14] & 255, page[i+15] & 255);
386 }
387 } 373 }
388 } 374 }
389#endif 375#endif
@@ -417,25 +403,11 @@ static int __init init_axis_flash(void)
417 403
418#if 0 /* Dump partition table so we can see what is going on */ 404#if 0 /* Dump partition table so we can see what is going on */
419 printk(KERN_INFO 405 printk(KERN_INFO
420 "axisflashmap: flash read %d bytes at 0x%08x, data: " 406 "axisflashmap: flash read %d bytes at 0x%08x, data: %8ph\n",
421 "%02x %02x %02x %02x %02x %02x %02x %02x\n", 407 len, CONFIG_ETRAX_PTABLE_SECTOR, page);
422 len, CONFIG_ETRAX_PTABLE_SECTOR,
423 page[0] & 255, page[1] & 255,
424 page[2] & 255, page[3] & 255,
425 page[4] & 255, page[5] & 255,
426 page[6] & 255, page[7] & 255);
427 printk(KERN_INFO 408 printk(KERN_INFO
428 "axisflashmap: partition table offset %d, data: " 409 "axisflashmap: partition table offset %d, data: %8ph\n",
429 "%02x %02x %02x %02x %02x %02x %02x %02x\n", 410 PARTITION_TABLE_OFFSET, page + PARTITION_TABLE_OFFSET);
430 PARTITION_TABLE_OFFSET,
431 page[PARTITION_TABLE_OFFSET+0] & 255,
432 page[PARTITION_TABLE_OFFSET+1] & 255,
433 page[PARTITION_TABLE_OFFSET+2] & 255,
434 page[PARTITION_TABLE_OFFSET+3] & 255,
435 page[PARTITION_TABLE_OFFSET+4] & 255,
436 page[PARTITION_TABLE_OFFSET+5] & 255,
437 page[PARTITION_TABLE_OFFSET+6] & 255,
438 page[PARTITION_TABLE_OFFSET+7] & 255);
439#endif 411#endif
440 } 412 }
441 413
diff --git a/arch/cris/arch-v32/drivers/i2c.c b/arch/cris/arch-v32/drivers/i2c.c
deleted file mode 100644
index 3b2c82ce8147..000000000000
--- a/arch/cris/arch-v32/drivers/i2c.c
+++ /dev/null
@@ -1,751 +0,0 @@
1/*!***************************************************************************
2*!
3*! FILE NAME : i2c.c
4*!
5*! DESCRIPTION: implements an interface for IIC/I2C, both directly from other
6*! kernel modules (i2c_writereg/readreg) and from userspace using
7*! ioctl()'s
8*!
9*! Nov 30 1998 Torbjorn Eliasson Initial version.
10*! Bjorn Wesen Elinux kernel version.
11*! Jan 14 2000 Johan Adolfsson Fixed PB shadow register stuff -
12*! don't use PB_I2C if DS1302 uses same bits,
13*! use PB.
14*| June 23 2003 Pieter Grimmerink Added 'i2c_sendnack'. i2c_readreg now
15*| generates nack on last received byte,
16*| instead of ack.
17*| i2c_getack changed data level while clock
18*| was high, causing DS75 to see a stop condition
19*!
20*! ---------------------------------------------------------------------------
21*!
22*! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN
23*!
24*!***************************************************************************/
25
26/****************** INCLUDE FILES SECTION ***********************************/
27
28#include <linux/module.h>
29#include <linux/sched.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/fs.h>
33#include <linux/string.h>
34#include <linux/init.h>
35#include <linux/mutex.h>
36
37#include <asm/etraxi2c.h>
38
39#include <asm/io.h>
40#include <asm/delay.h>
41
42#include "i2c.h"
43
44/****************** I2C DEFINITION SECTION *************************/
45
46#define D(x)
47
48#define I2C_MAJOR 123 /* LOCAL/EXPERIMENTAL */
49static DEFINE_MUTEX(i2c_mutex);
50static const char i2c_name[] = "i2c";
51
52#define CLOCK_LOW_TIME 8
53#define CLOCK_HIGH_TIME 8
54#define START_CONDITION_HOLD_TIME 8
55#define STOP_CONDITION_HOLD_TIME 8
56#define ENABLE_OUTPUT 0x01
57#define ENABLE_INPUT 0x00
58#define I2C_CLOCK_HIGH 1
59#define I2C_CLOCK_LOW 0
60#define I2C_DATA_HIGH 1
61#define I2C_DATA_LOW 0
62
63#define i2c_enable()
64#define i2c_disable()
65
66/* enable or disable output-enable, to select output or input on the i2c bus */
67
68#define i2c_dir_out() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_out)
69#define i2c_dir_in() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_in)
70
71/* control the i2c clock and data signals */
72
73#define i2c_clk(x) crisv32_io_set(&cris_i2c_clk, x)
74#define i2c_data(x) crisv32_io_set(&cris_i2c_data, x)
75
76/* read a bit from the i2c interface */
77
78#define i2c_getbit() crisv32_io_rd(&cris_i2c_data)
79
80#define i2c_delay(usecs) udelay(usecs)
81
82static DEFINE_SPINLOCK(i2c_lock); /* Protect directions etc */
83
84/****************** VARIABLE SECTION ************************************/
85
86static struct crisv32_iopin cris_i2c_clk;
87static struct crisv32_iopin cris_i2c_data;
88
89/****************** FUNCTION DEFINITION SECTION *************************/
90
91
92/* generate i2c start condition */
93
94void
95i2c_start(void)
96{
97 /*
98 * SCL=1 SDA=1
99 */
100 i2c_dir_out();
101 i2c_delay(CLOCK_HIGH_TIME/6);
102 i2c_data(I2C_DATA_HIGH);
103 i2c_clk(I2C_CLOCK_HIGH);
104 i2c_delay(CLOCK_HIGH_TIME);
105 /*
106 * SCL=1 SDA=0
107 */
108 i2c_data(I2C_DATA_LOW);
109 i2c_delay(START_CONDITION_HOLD_TIME);
110 /*
111 * SCL=0 SDA=0
112 */
113 i2c_clk(I2C_CLOCK_LOW);
114 i2c_delay(CLOCK_LOW_TIME);
115}
116
117/* generate i2c stop condition */
118
119void
120i2c_stop(void)
121{
122 i2c_dir_out();
123
124 /*
125 * SCL=0 SDA=0
126 */
127 i2c_clk(I2C_CLOCK_LOW);
128 i2c_data(I2C_DATA_LOW);
129 i2c_delay(CLOCK_LOW_TIME*2);
130 /*
131 * SCL=1 SDA=0
132 */
133 i2c_clk(I2C_CLOCK_HIGH);
134 i2c_delay(CLOCK_HIGH_TIME*2);
135 /*
136 * SCL=1 SDA=1
137 */
138 i2c_data(I2C_DATA_HIGH);
139 i2c_delay(STOP_CONDITION_HOLD_TIME);
140
141 i2c_dir_in();
142}
143
144/* write a byte to the i2c interface */
145
146void
147i2c_outbyte(unsigned char x)
148{
149 int i;
150
151 i2c_dir_out();
152
153 for (i = 0; i < 8; i++) {
154 if (x & 0x80) {
155 i2c_data(I2C_DATA_HIGH);
156 } else {
157 i2c_data(I2C_DATA_LOW);
158 }
159
160 i2c_delay(CLOCK_LOW_TIME/2);
161 i2c_clk(I2C_CLOCK_HIGH);
162 i2c_delay(CLOCK_HIGH_TIME);
163 i2c_clk(I2C_CLOCK_LOW);
164 i2c_delay(CLOCK_LOW_TIME/2);
165 x <<= 1;
166 }
167 i2c_data(I2C_DATA_LOW);
168 i2c_delay(CLOCK_LOW_TIME/2);
169
170 /*
171 * enable input
172 */
173 i2c_dir_in();
174}
175
176/* read a byte from the i2c interface */
177
178unsigned char
179i2c_inbyte(void)
180{
181 unsigned char aBitByte = 0;
182 int i;
183
184 /* Switch off I2C to get bit */
185 i2c_disable();
186 i2c_dir_in();
187 i2c_delay(CLOCK_HIGH_TIME/2);
188
189 /* Get bit */
190 aBitByte |= i2c_getbit();
191
192 /* Enable I2C */
193 i2c_enable();
194 i2c_delay(CLOCK_LOW_TIME/2);
195
196 for (i = 1; i < 8; i++) {
197 aBitByte <<= 1;
198 /* Clock pulse */
199 i2c_clk(I2C_CLOCK_HIGH);
200 i2c_delay(CLOCK_HIGH_TIME);
201 i2c_clk(I2C_CLOCK_LOW);
202 i2c_delay(CLOCK_LOW_TIME);
203
204 /* Switch off I2C to get bit */
205 i2c_disable();
206 i2c_dir_in();
207 i2c_delay(CLOCK_HIGH_TIME/2);
208
209 /* Get bit */
210 aBitByte |= i2c_getbit();
211
212 /* Enable I2C */
213 i2c_enable();
214 i2c_delay(CLOCK_LOW_TIME/2);
215 }
216 i2c_clk(I2C_CLOCK_HIGH);
217 i2c_delay(CLOCK_HIGH_TIME);
218
219 /*
220 * we leave the clock low, getbyte is usually followed
221 * by sendack/nack, they assume the clock to be low
222 */
223 i2c_clk(I2C_CLOCK_LOW);
224 return aBitByte;
225}
226
227/*#---------------------------------------------------------------------------
228*#
229*# FUNCTION NAME: i2c_getack
230*#
231*# DESCRIPTION : checks if ack was received from ic2
232*#
233*#--------------------------------------------------------------------------*/
234
235int
236i2c_getack(void)
237{
238 int ack = 1;
239 /*
240 * enable output
241 */
242 i2c_dir_out();
243 /*
244 * Release data bus by setting
245 * data high
246 */
247 i2c_data(I2C_DATA_HIGH);
248 /*
249 * enable input
250 */
251 i2c_dir_in();
252 i2c_delay(CLOCK_HIGH_TIME/4);
253 /*
254 * generate ACK clock pulse
255 */
256 i2c_clk(I2C_CLOCK_HIGH);
257#if 0
258 /*
259 * Use PORT PB instead of I2C
260 * for input. (I2C not working)
261 */
262 i2c_clk(1);
263 i2c_data(1);
264 /*
265 * switch off I2C
266 */
267 i2c_data(1);
268 i2c_disable();
269 i2c_dir_in();
270#endif
271
272 /*
273 * now wait for ack
274 */
275 i2c_delay(CLOCK_HIGH_TIME/2);
276 /*
277 * check for ack
278 */
279 if (i2c_getbit())
280 ack = 0;
281 i2c_delay(CLOCK_HIGH_TIME/2);
282 if (!ack) {
283 if (!i2c_getbit()) /* receiver pulld SDA low */
284 ack = 1;
285 i2c_delay(CLOCK_HIGH_TIME/2);
286 }
287
288 /*
289 * our clock is high now, make sure data is low
290 * before we enable our output. If we keep data high
291 * and enable output, we would generate a stop condition.
292 */
293#if 0
294 i2c_data(I2C_DATA_LOW);
295
296 /*
297 * end clock pulse
298 */
299 i2c_enable();
300 i2c_dir_out();
301#endif
302 i2c_clk(I2C_CLOCK_LOW);
303 i2c_delay(CLOCK_HIGH_TIME/4);
304 /*
305 * enable output
306 */
307 i2c_dir_out();
308 /*
309 * remove ACK clock pulse
310 */
311 i2c_data(I2C_DATA_HIGH);
312 i2c_delay(CLOCK_LOW_TIME/2);
313 return ack;
314}
315
316/*#---------------------------------------------------------------------------
317*#
318*# FUNCTION NAME: I2C::sendAck
319*#
320*# DESCRIPTION : Send ACK on received data
321*#
322*#--------------------------------------------------------------------------*/
323void
324i2c_sendack(void)
325{
326 /*
327 * enable output
328 */
329 i2c_delay(CLOCK_LOW_TIME);
330 i2c_dir_out();
331 /*
332 * set ack pulse high
333 */
334 i2c_data(I2C_DATA_LOW);
335 /*
336 * generate clock pulse
337 */
338 i2c_delay(CLOCK_HIGH_TIME/6);
339 i2c_clk(I2C_CLOCK_HIGH);
340 i2c_delay(CLOCK_HIGH_TIME);
341 i2c_clk(I2C_CLOCK_LOW);
342 i2c_delay(CLOCK_LOW_TIME/6);
343 /*
344 * reset data out
345 */
346 i2c_data(I2C_DATA_HIGH);
347 i2c_delay(CLOCK_LOW_TIME);
348
349 i2c_dir_in();
350}
351
352/*#---------------------------------------------------------------------------
353*#
354*# FUNCTION NAME: i2c_sendnack
355*#
356*# DESCRIPTION : Sends NACK on received data
357*#
358*#--------------------------------------------------------------------------*/
359void
360i2c_sendnack(void)
361{
362 /*
363 * enable output
364 */
365 i2c_delay(CLOCK_LOW_TIME);
366 i2c_dir_out();
367 /*
368 * set data high
369 */
370 i2c_data(I2C_DATA_HIGH);
371 /*
372 * generate clock pulse
373 */
374 i2c_delay(CLOCK_HIGH_TIME/6);
375 i2c_clk(I2C_CLOCK_HIGH);
376 i2c_delay(CLOCK_HIGH_TIME);
377 i2c_clk(I2C_CLOCK_LOW);
378 i2c_delay(CLOCK_LOW_TIME);
379
380 i2c_dir_in();
381}
382
383/*#---------------------------------------------------------------------------
384*#
385*# FUNCTION NAME: i2c_write
386*#
387*# DESCRIPTION : Writes a value to an I2C device
388*#
389*#--------------------------------------------------------------------------*/
390int
391i2c_write(unsigned char theSlave, void *data, size_t nbytes)
392{
393 int error, cntr = 3;
394 unsigned char bytes_wrote = 0;
395 unsigned char value;
396 unsigned long flags;
397
398 spin_lock_irqsave(&i2c_lock, flags);
399
400 do {
401 error = 0;
402
403 i2c_start();
404 /*
405 * send slave address
406 */
407 i2c_outbyte((theSlave & 0xfe));
408 /*
409 * wait for ack
410 */
411 if (!i2c_getack())
412 error = 1;
413 /*
414 * send data
415 */
416 for (bytes_wrote = 0; bytes_wrote < nbytes; bytes_wrote++) {
417 memcpy(&value, data + bytes_wrote, sizeof value);
418 i2c_outbyte(value);
419 /*
420 * now it's time to wait for ack
421 */
422 if (!i2c_getack())
423 error |= 4;
424 }
425 /*
426 * end byte stream
427 */
428 i2c_stop();
429
430 } while (error && cntr--);
431
432 i2c_delay(CLOCK_LOW_TIME);
433
434 spin_unlock_irqrestore(&i2c_lock, flags);
435
436 return -error;
437}
438
439/*#---------------------------------------------------------------------------
440*#
441*# FUNCTION NAME: i2c_read
442*#
443*# DESCRIPTION : Reads a value from an I2C device
444*#
445*#--------------------------------------------------------------------------*/
446int
447i2c_read(unsigned char theSlave, void *data, size_t nbytes)
448{
449 unsigned char b = 0;
450 unsigned char bytes_read = 0;
451 int error, cntr = 3;
452 unsigned long flags;
453
454 spin_lock_irqsave(&i2c_lock, flags);
455
456 do {
457 error = 0;
458 memset(data, 0, nbytes);
459 /*
460 * generate start condition
461 */
462 i2c_start();
463 /*
464 * send slave address
465 */
466 i2c_outbyte((theSlave | 0x01));
467 /*
468 * wait for ack
469 */
470 if (!i2c_getack())
471 error = 1;
472 /*
473 * fetch data
474 */
475 for (bytes_read = 0; bytes_read < nbytes; bytes_read++) {
476 b = i2c_inbyte();
477 memcpy(data + bytes_read, &b, sizeof b);
478
479 if (bytes_read < (nbytes - 1))
480 i2c_sendack();
481 }
482 /*
483 * last received byte needs to be nacked
484 * instead of acked
485 */
486 i2c_sendnack();
487 /*
488 * end sequence
489 */
490 i2c_stop();
491 } while (error && cntr--);
492
493 spin_unlock_irqrestore(&i2c_lock, flags);
494
495 return -error;
496}
497
498/*#---------------------------------------------------------------------------
499*#
500*# FUNCTION NAME: i2c_writereg
501*#
502*# DESCRIPTION : Writes a value to an I2C device
503*#
504*#--------------------------------------------------------------------------*/
505int
506i2c_writereg(unsigned char theSlave, unsigned char theReg,
507 unsigned char theValue)
508{
509 int error, cntr = 3;
510 unsigned long flags;
511
512 spin_lock_irqsave(&i2c_lock, flags);
513
514 do {
515 error = 0;
516
517 i2c_start();
518 /*
519 * send slave address
520 */
521 i2c_outbyte((theSlave & 0xfe));
522 /*
523 * wait for ack
524 */
525 if(!i2c_getack())
526 error = 1;
527 /*
528 * now select register
529 */
530 i2c_dir_out();
531 i2c_outbyte(theReg);
532 /*
533 * now it's time to wait for ack
534 */
535 if(!i2c_getack())
536 error |= 2;
537 /*
538 * send register register data
539 */
540 i2c_outbyte(theValue);
541 /*
542 * now it's time to wait for ack
543 */
544 if(!i2c_getack())
545 error |= 4;
546 /*
547 * end byte stream
548 */
549 i2c_stop();
550 } while(error && cntr--);
551
552 i2c_delay(CLOCK_LOW_TIME);
553
554 spin_unlock_irqrestore(&i2c_lock, flags);
555
556 return -error;
557}
558
559/*#---------------------------------------------------------------------------
560*#
561*# FUNCTION NAME: i2c_readreg
562*#
563*# DESCRIPTION : Reads a value from the decoder registers.
564*#
565*#--------------------------------------------------------------------------*/
566unsigned char
567i2c_readreg(unsigned char theSlave, unsigned char theReg)
568{
569 unsigned char b = 0;
570 int error, cntr = 3;
571 unsigned long flags;
572
573 spin_lock_irqsave(&i2c_lock, flags);
574
575 do {
576 error = 0;
577 /*
578 * generate start condition
579 */
580 i2c_start();
581
582 /*
583 * send slave address
584 */
585 i2c_outbyte((theSlave & 0xfe));
586 /*
587 * wait for ack
588 */
589 if(!i2c_getack())
590 error = 1;
591 /*
592 * now select register
593 */
594 i2c_dir_out();
595 i2c_outbyte(theReg);
596 /*
597 * now it's time to wait for ack
598 */
599 if(!i2c_getack())
600 error |= 2;
601 /*
602 * repeat start condition
603 */
604 i2c_delay(CLOCK_LOW_TIME);
605 i2c_start();
606 /*
607 * send slave address
608 */
609 i2c_outbyte(theSlave | 0x01);
610 /*
611 * wait for ack
612 */
613 if(!i2c_getack())
614 error |= 4;
615 /*
616 * fetch register
617 */
618 b = i2c_inbyte();
619 /*
620 * last received byte needs to be nacked
621 * instead of acked
622 */
623 i2c_sendnack();
624 /*
625 * end sequence
626 */
627 i2c_stop();
628
629 } while(error && cntr--);
630
631 spin_unlock_irqrestore(&i2c_lock, flags);
632
633 return b;
634}
635
636static int
637i2c_open(struct inode *inode, struct file *filp)
638{
639 return 0;
640}
641
642static int
643i2c_release(struct inode *inode, struct file *filp)
644{
645 return 0;
646}
647
648/* Main device API. ioctl's to write or read to/from i2c registers.
649 */
650
651static long
652i2c_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
653{
654 int ret;
655 if(_IOC_TYPE(cmd) != ETRAXI2C_IOCTYPE) {
656 return -ENOTTY;
657 }
658
659 switch (_IOC_NR(cmd)) {
660 case I2C_WRITEREG:
661 /* write to an i2c slave */
662 D(printk("i2cw %d %d %d\n",
663 I2C_ARGSLAVE(arg),
664 I2C_ARGREG(arg),
665 I2C_ARGVALUE(arg)));
666
667 mutex_lock(&i2c_mutex);
668 ret = i2c_writereg(I2C_ARGSLAVE(arg),
669 I2C_ARGREG(arg),
670 I2C_ARGVALUE(arg));
671 mutex_unlock(&i2c_mutex);
672 return ret;
673
674 case I2C_READREG:
675 {
676 unsigned char val;
677 /* read from an i2c slave */
678 D(printk("i2cr %d %d ",
679 I2C_ARGSLAVE(arg),
680 I2C_ARGREG(arg)));
681 mutex_lock(&i2c_mutex);
682 val = i2c_readreg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg));
683 mutex_unlock(&i2c_mutex);
684 D(printk("= %d\n", val));
685 return val;
686 }
687 default:
688 return -EINVAL;
689
690 }
691
692 return 0;
693}
694
695static const struct file_operations i2c_fops = {
696 .owner = THIS_MODULE,
697 .unlocked_ioctl = i2c_ioctl,
698 .open = i2c_open,
699 .release = i2c_release,
700 .llseek = noop_llseek,
701};
702
703static int __init i2c_init(void)
704{
705 static int res;
706 static int first = 1;
707
708 if (!first)
709 return res;
710
711 first = 0;
712
713 /* Setup and enable the DATA and CLK pins */
714
715 res = crisv32_io_get_name(&cris_i2c_data,
716 CONFIG_ETRAX_V32_I2C_DATA_PORT);
717 if (res < 0)
718 return res;
719
720 res = crisv32_io_get_name(&cris_i2c_clk, CONFIG_ETRAX_V32_I2C_CLK_PORT);
721 crisv32_io_set_dir(&cris_i2c_clk, crisv32_io_dir_out);
722
723 return res;
724}
725
726
727static int __init i2c_register(void)
728{
729 int res;
730
731 res = i2c_init();
732 if (res < 0)
733 return res;
734
735 /* register char device */
736
737 res = register_chrdev(I2C_MAJOR, i2c_name, &i2c_fops);
738 if (res < 0) {
739 printk(KERN_ERR "i2c: couldn't get a major number.\n");
740 return res;
741 }
742
743 printk(KERN_INFO
744 "I2C driver v2.2, (c) 1999-2007 Axis Communications AB\n");
745
746 return 0;
747}
748/* this makes sure that i2c_init is called during boot */
749module_init(i2c_register);
750
751/****************** END OF FILE i2c.c ********************************/
diff --git a/arch/cris/arch-v32/drivers/i2c.h b/arch/cris/arch-v32/drivers/i2c.h
deleted file mode 100644
index d9cc856f89fb..000000000000
--- a/arch/cris/arch-v32/drivers/i2c.h
+++ /dev/null
@@ -1,16 +0,0 @@
1
2#include <linux/init.h>
3
4/* High level I2C actions */
5int i2c_write(unsigned char theSlave, void *data, size_t nbytes);
6int i2c_read(unsigned char theSlave, void *data, size_t nbytes);
7int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue);
8unsigned char i2c_readreg(unsigned char theSlave, unsigned char theReg);
9
10/* Low level I2C */
11void i2c_start(void);
12void i2c_stop(void);
13void i2c_outbyte(unsigned char x);
14unsigned char i2c_inbyte(void);
15int i2c_getack(void);
16void i2c_sendack(void);
diff --git a/arch/cris/arch-v32/drivers/mach-a3/Makefile b/arch/cris/arch-v32/drivers/mach-a3/Makefile
index 5c6d2a2a080e..59028d0b981c 100644
--- a/arch/cris/arch-v32/drivers/mach-a3/Makefile
+++ b/arch/cris/arch-v32/drivers/mach-a3/Makefile
@@ -3,4 +3,3 @@
3# 3#
4 4
5obj-$(CONFIG_ETRAX_NANDFLASH) += nandflash.o 5obj-$(CONFIG_ETRAX_NANDFLASH) += nandflash.o
6obj-$(CONFIG_ETRAX_GPIO) += gpio.o
diff --git a/arch/cris/arch-v32/drivers/mach-a3/gpio.c b/arch/cris/arch-v32/drivers/mach-a3/gpio.c
deleted file mode 100644
index c92e1da3684d..000000000000
--- a/arch/cris/arch-v32/drivers/mach-a3/gpio.c
+++ /dev/null
@@ -1,999 +0,0 @@
1/*
2 * Artec-3 general port I/O device
3 *
4 * Copyright (c) 2007 Axis Communications AB
5 *
6 * Authors: Bjorn Wesen (initial version)
7 * Ola Knutsson (LED handling)
8 * Johan Adolfsson (read/set directions, write, port G,
9 * port to ETRAX FS.
10 * Ricard Wanderlof (PWM for Artpec-3)
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/ioport.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/fs.h>
21#include <linux/string.h>
22#include <linux/poll.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/mutex.h>
27
28#include <asm/etraxgpio.h>
29#include <hwregs/reg_map.h>
30#include <hwregs/reg_rdwr.h>
31#include <hwregs/gio_defs.h>
32#include <hwregs/intr_vect_defs.h>
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <mach/pinmux.h>
36
37#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
38#include "../i2c.h"
39
40#define VIRT_I2C_ADDR 0x40
41#endif
42
43/* The following gio ports on ARTPEC-3 is available:
44 * pa 32 bits
45 * pb 32 bits
46 * pc 16 bits
47 * each port has a rw_px_dout, r_px_din and rw_px_oe register.
48 */
49
50#define GPIO_MAJOR 120 /* experimental MAJOR number */
51
52#define I2C_INTERRUPT_BITS 0x300 /* i2c0_done and i2c1_done bits */
53
54#define D(x)
55
56#if 0
57static int dp_cnt;
58#define DP(x) \
59 do { \
60 dp_cnt++; \
61 if (dp_cnt % 1000 == 0) \
62 x; \
63 } while (0)
64#else
65#define DP(x)
66#endif
67
68static DEFINE_MUTEX(gpio_mutex);
69static char gpio_name[] = "etrax gpio";
70
71#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
72static int virtual_gpio_ioctl(struct file *file, unsigned int cmd,
73 unsigned long arg);
74#endif
75static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
76static ssize_t gpio_write(struct file *file, const char __user *buf,
77 size_t count, loff_t *off);
78static int gpio_open(struct inode *inode, struct file *filp);
79static int gpio_release(struct inode *inode, struct file *filp);
80static unsigned int gpio_poll(struct file *filp,
81 struct poll_table_struct *wait);
82
83/* private data per open() of this driver */
84
85struct gpio_private {
86 struct gpio_private *next;
87 /* The IO_CFG_WRITE_MODE_VALUE only support 8 bits: */
88 unsigned char clk_mask;
89 unsigned char data_mask;
90 unsigned char write_msb;
91 unsigned char pad1;
92 /* These fields are generic */
93 unsigned long highalarm, lowalarm;
94 wait_queue_head_t alarm_wq;
95 int minor;
96};
97
98static void gpio_set_alarm(struct gpio_private *priv);
99static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg);
100static int gpio_pwm_ioctl(struct gpio_private *priv, unsigned int cmd,
101 unsigned long arg);
102
103
104/* linked list of alarms to check for */
105
106static struct gpio_private *alarmlist;
107
108static int wanted_interrupts;
109
110static DEFINE_SPINLOCK(gpio_lock);
111
112#define NUM_PORTS (GPIO_MINOR_LAST+1)
113#define GIO_REG_RD_ADDR(reg) \
114 (unsigned long *)(regi_gio + REG_RD_ADDR_gio_##reg)
115#define GIO_REG_WR_ADDR(reg) \
116 (unsigned long *)(regi_gio + REG_WR_ADDR_gio_##reg)
117static unsigned long led_dummy;
118static unsigned long port_d_dummy; /* Only input on Artpec-3 */
119#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
120static unsigned long port_e_dummy; /* Non existent on Artpec-3 */
121static unsigned long virtual_dummy;
122static unsigned long virtual_rw_pv_oe = CONFIG_ETRAX_DEF_GIO_PV_OE;
123static unsigned short cached_virtual_gpio_read;
124#endif
125
126static unsigned long *data_out[NUM_PORTS] = {
127 GIO_REG_WR_ADDR(rw_pa_dout),
128 GIO_REG_WR_ADDR(rw_pb_dout),
129 &led_dummy,
130 GIO_REG_WR_ADDR(rw_pc_dout),
131 &port_d_dummy,
132#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
133 &port_e_dummy,
134 &virtual_dummy,
135#endif
136};
137
138static unsigned long *data_in[NUM_PORTS] = {
139 GIO_REG_RD_ADDR(r_pa_din),
140 GIO_REG_RD_ADDR(r_pb_din),
141 &led_dummy,
142 GIO_REG_RD_ADDR(r_pc_din),
143 GIO_REG_RD_ADDR(r_pd_din),
144#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
145 &port_e_dummy,
146 &virtual_dummy,
147#endif
148};
149
150static unsigned long changeable_dir[NUM_PORTS] = {
151 CONFIG_ETRAX_PA_CHANGEABLE_DIR,
152 CONFIG_ETRAX_PB_CHANGEABLE_DIR,
153 0,
154 CONFIG_ETRAX_PC_CHANGEABLE_DIR,
155 0,
156#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
157 0,
158 CONFIG_ETRAX_PV_CHANGEABLE_DIR,
159#endif
160};
161
162static unsigned long changeable_bits[NUM_PORTS] = {
163 CONFIG_ETRAX_PA_CHANGEABLE_BITS,
164 CONFIG_ETRAX_PB_CHANGEABLE_BITS,
165 0,
166 CONFIG_ETRAX_PC_CHANGEABLE_BITS,
167 0,
168#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
169 0,
170 CONFIG_ETRAX_PV_CHANGEABLE_BITS,
171#endif
172};
173
174static unsigned long *dir_oe[NUM_PORTS] = {
175 GIO_REG_WR_ADDR(rw_pa_oe),
176 GIO_REG_WR_ADDR(rw_pb_oe),
177 &led_dummy,
178 GIO_REG_WR_ADDR(rw_pc_oe),
179 &port_d_dummy,
180#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
181 &port_e_dummy,
182 &virtual_rw_pv_oe,
183#endif
184};
185
186static void gpio_set_alarm(struct gpio_private *priv)
187{
188 int bit;
189 int intr_cfg;
190 int mask;
191 int pins;
192 unsigned long flags;
193
194 spin_lock_irqsave(&gpio_lock, flags);
195 intr_cfg = REG_RD_INT(gio, regi_gio, rw_intr_cfg);
196 pins = REG_RD_INT(gio, regi_gio, rw_intr_pins);
197 mask = REG_RD_INT(gio, regi_gio, rw_intr_mask) & I2C_INTERRUPT_BITS;
198
199 for (bit = 0; bit < 32; bit++) {
200 int intr = bit % 8;
201 int pin = bit / 8;
202 if (priv->minor < GPIO_MINOR_LEDS)
203 pin += priv->minor * 4;
204 else
205 pin += (priv->minor - 1) * 4;
206
207 if (priv->highalarm & (1<<bit)) {
208 intr_cfg |= (regk_gio_hi << (intr * 3));
209 mask |= 1 << intr;
210 wanted_interrupts = mask & 0xff;
211 pins |= pin << (intr * 4);
212 } else if (priv->lowalarm & (1<<bit)) {
213 intr_cfg |= (regk_gio_lo << (intr * 3));
214 mask |= 1 << intr;
215 wanted_interrupts = mask & 0xff;
216 pins |= pin << (intr * 4);
217 }
218 }
219
220 REG_WR_INT(gio, regi_gio, rw_intr_cfg, intr_cfg);
221 REG_WR_INT(gio, regi_gio, rw_intr_pins, pins);
222 REG_WR_INT(gio, regi_gio, rw_intr_mask, mask);
223
224 spin_unlock_irqrestore(&gpio_lock, flags);
225}
226
227static unsigned int gpio_poll(struct file *file, struct poll_table_struct *wait)
228{
229 unsigned int mask = 0;
230 struct gpio_private *priv = file->private_data;
231 unsigned long data;
232 unsigned long tmp;
233
234 if (priv->minor >= GPIO_MINOR_PWM0 &&
235 priv->minor <= GPIO_MINOR_LAST_PWM)
236 return 0;
237
238 poll_wait(file, &priv->alarm_wq, wait);
239 if (priv->minor <= GPIO_MINOR_D) {
240 data = readl(data_in[priv->minor]);
241 REG_WR_INT(gio, regi_gio, rw_ack_intr, wanted_interrupts);
242 tmp = REG_RD_INT(gio, regi_gio, rw_intr_mask);
243 tmp &= I2C_INTERRUPT_BITS;
244 tmp |= wanted_interrupts;
245 REG_WR_INT(gio, regi_gio, rw_intr_mask, tmp);
246 } else
247 return 0;
248
249 if ((data & priv->highalarm) || (~data & priv->lowalarm))
250 mask = POLLIN|POLLRDNORM;
251
252 DP(printk(KERN_DEBUG "gpio_poll ready: mask 0x%08X\n", mask));
253 return mask;
254}
255
256static irqreturn_t gpio_interrupt(int irq, void *dev_id)
257{
258 reg_gio_rw_intr_mask intr_mask;
259 reg_gio_r_masked_intr masked_intr;
260 reg_gio_rw_ack_intr ack_intr;
261 unsigned long flags;
262 unsigned long tmp;
263 unsigned long tmp2;
264#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
265 unsigned char enable_gpiov_ack = 0;
266#endif
267
268 /* Find what PA interrupts are active */
269 masked_intr = REG_RD(gio, regi_gio, r_masked_intr);
270 tmp = REG_TYPE_CONV(unsigned long, reg_gio_r_masked_intr, masked_intr);
271
272 /* Find those that we have enabled */
273 spin_lock_irqsave(&gpio_lock, flags);
274 tmp &= wanted_interrupts;
275 spin_unlock_irqrestore(&gpio_lock, flags);
276
277#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
278 /* Something changed on virtual GPIO. Interrupt is acked by
279 * reading the device.
280 */
281 if (tmp & (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN)) {
282 i2c_read(VIRT_I2C_ADDR, (void *)&cached_virtual_gpio_read,
283 sizeof(cached_virtual_gpio_read));
284 enable_gpiov_ack = 1;
285 }
286#endif
287
288 /* Ack them */
289 ack_intr = REG_TYPE_CONV(reg_gio_rw_ack_intr, unsigned long, tmp);
290 REG_WR(gio, regi_gio, rw_ack_intr, ack_intr);
291
292 /* Disable those interrupts.. */
293 intr_mask = REG_RD(gio, regi_gio, rw_intr_mask);
294 tmp2 = REG_TYPE_CONV(unsigned long, reg_gio_rw_intr_mask, intr_mask);
295 tmp2 &= ~tmp;
296#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
297 /* Do not disable interrupt on virtual GPIO. Changes on virtual
298 * pins are only noticed by an interrupt.
299 */
300 if (enable_gpiov_ack)
301 tmp2 |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
302#endif
303 intr_mask = REG_TYPE_CONV(reg_gio_rw_intr_mask, unsigned long, tmp2);
304 REG_WR(gio, regi_gio, rw_intr_mask, intr_mask);
305
306 return IRQ_RETVAL(tmp);
307}
308
309static void gpio_write_bit(unsigned long *port, unsigned char data, int bit,
310 unsigned char clk_mask, unsigned char data_mask)
311{
312 unsigned long shadow = readl(port) & ~clk_mask;
313 writel(shadow, port);
314 if (data & 1 << bit)
315 shadow |= data_mask;
316 else
317 shadow &= ~data_mask;
318 writel(shadow, port);
319 /* For FPGA: min 5.0ns (DCC) before CCLK high */
320 shadow |= clk_mask;
321 writel(shadow, port);
322}
323
324static void gpio_write_byte(struct gpio_private *priv, unsigned long *port,
325 unsigned char data)
326{
327 int i;
328
329 if (priv->write_msb)
330 for (i = 7; i >= 0; i--)
331 gpio_write_bit(port, data, i, priv->clk_mask,
332 priv->data_mask);
333 else
334 for (i = 0; i <= 7; i++)
335 gpio_write_bit(port, data, i, priv->clk_mask,
336 priv->data_mask);
337}
338
339
340static ssize_t gpio_write(struct file *file, const char __user *buf,
341 size_t count, loff_t *off)
342{
343 struct gpio_private *priv = file->private_data;
344 unsigned long flags;
345 ssize_t retval = count;
346 /* Only bits 0-7 may be used for write operations but allow all
347 devices except leds... */
348#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
349 if (priv->minor == GPIO_MINOR_V)
350 return -EFAULT;
351#endif
352 if (priv->minor == GPIO_MINOR_LEDS)
353 return -EFAULT;
354
355 if (priv->minor >= GPIO_MINOR_PWM0 &&
356 priv->minor <= GPIO_MINOR_LAST_PWM)
357 return -EFAULT;
358
359 if (!access_ok(VERIFY_READ, buf, count))
360 return -EFAULT;
361
362 /* It must have been configured using the IO_CFG_WRITE_MODE */
363 /* Perhaps a better error code? */
364 if (priv->clk_mask == 0 || priv->data_mask == 0)
365 return -EPERM;
366
367 D(printk(KERN_DEBUG "gpio_write: %lu to data 0x%02X clk 0x%02X "
368 "msb: %i\n",
369 count, priv->data_mask, priv->clk_mask, priv->write_msb));
370
371 spin_lock_irqsave(&gpio_lock, flags);
372
373 while (count--)
374 gpio_write_byte(priv, data_out[priv->minor], *buf++);
375
376 spin_unlock_irqrestore(&gpio_lock, flags);
377 return retval;
378}
379
380static int gpio_open(struct inode *inode, struct file *filp)
381{
382 struct gpio_private *priv;
383 int p = iminor(inode);
384
385 if (p > GPIO_MINOR_LAST_PWM ||
386 (p > GPIO_MINOR_LAST && p < GPIO_MINOR_PWM0))
387 return -EINVAL;
388
389 priv = kmalloc(sizeof(struct gpio_private), GFP_KERNEL);
390
391 if (!priv)
392 return -ENOMEM;
393
394 mutex_lock(&gpio_mutex);
395 memset(priv, 0, sizeof(*priv));
396
397 priv->minor = p;
398 filp->private_data = priv;
399
400 /* initialize the io/alarm struct, not for PWM ports though */
401 if (p <= GPIO_MINOR_LAST) {
402
403 priv->clk_mask = 0;
404 priv->data_mask = 0;
405 priv->highalarm = 0;
406 priv->lowalarm = 0;
407
408 init_waitqueue_head(&priv->alarm_wq);
409
410 /* link it into our alarmlist */
411 spin_lock_irq(&gpio_lock);
412 priv->next = alarmlist;
413 alarmlist = priv;
414 spin_unlock_irq(&gpio_lock);
415 }
416
417 mutex_unlock(&gpio_mutex);
418 return 0;
419}
420
421static int gpio_release(struct inode *inode, struct file *filp)
422{
423 struct gpio_private *p;
424 struct gpio_private *todel;
425 /* local copies while updating them: */
426 unsigned long a_high, a_low;
427
428 /* prepare to free private structure */
429 todel = filp->private_data;
430
431 /* unlink from alarmlist - only for non-PWM ports though */
432 if (todel->minor <= GPIO_MINOR_LAST) {
433 spin_lock_irq(&gpio_lock);
434 p = alarmlist;
435
436 if (p == todel)
437 alarmlist = todel->next;
438 else {
439 while (p->next != todel)
440 p = p->next;
441 p->next = todel->next;
442 }
443
444 /* Check if there are still any alarms set */
445 p = alarmlist;
446 a_high = 0;
447 a_low = 0;
448 while (p) {
449 if (p->minor == GPIO_MINOR_A) {
450#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
451 p->lowalarm |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
452#endif
453 a_high |= p->highalarm;
454 a_low |= p->lowalarm;
455 }
456
457 p = p->next;
458 }
459
460#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
461 /* Variable 'a_low' needs to be set here again
462 * to ensure that interrupt for virtual GPIO is handled.
463 */
464 a_low |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
465#endif
466
467 spin_unlock_irq(&gpio_lock);
468 }
469 kfree(todel);
470
471 return 0;
472}
473
474/* Main device API. ioctl's to read/set/clear bits, as well as to
475 * set alarms to wait for using a subsequent select().
476 */
477
478inline unsigned long setget_input(struct gpio_private *priv, unsigned long arg)
479{
480 /* Set direction 0=unchanged 1=input,
481 * return mask with 1=input
482 */
483 unsigned long flags;
484 unsigned long dir_shadow;
485
486 spin_lock_irqsave(&gpio_lock, flags);
487
488 dir_shadow = readl(dir_oe[priv->minor]) &
489 ~(arg & changeable_dir[priv->minor]);
490 writel(dir_shadow, dir_oe[priv->minor]);
491
492 spin_unlock_irqrestore(&gpio_lock, flags);
493
494 if (priv->minor == GPIO_MINOR_C)
495 dir_shadow ^= 0xFFFF; /* Only 16 bits */
496#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
497 else if (priv->minor == GPIO_MINOR_V)
498 dir_shadow ^= 0xFFFF; /* Only 16 bits */
499#endif
500 else
501 dir_shadow ^= 0xFFFFFFFF; /* PA, PB and PD 32 bits */
502
503 return dir_shadow;
504
505} /* setget_input */
506
507static inline unsigned long setget_output(struct gpio_private *priv,
508 unsigned long arg)
509{
510 unsigned long flags;
511 unsigned long dir_shadow;
512
513 spin_lock_irqsave(&gpio_lock, flags);
514
515 dir_shadow = readl(dir_oe[priv->minor]) |
516 (arg & changeable_dir[priv->minor]);
517 writel(dir_shadow, dir_oe[priv->minor]);
518
519 spin_unlock_irqrestore(&gpio_lock, flags);
520 return dir_shadow;
521} /* setget_output */
522
523static long gpio_ioctl_unlocked(struct file *file,
524 unsigned int cmd, unsigned long arg)
525{
526 unsigned long flags;
527 unsigned long val;
528 unsigned long shadow;
529 struct gpio_private *priv = file->private_data;
530
531 if (_IOC_TYPE(cmd) != ETRAXGPIO_IOCTYPE)
532 return -ENOTTY;
533
534 /* Check for special ioctl handlers first */
535
536#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
537 if (priv->minor == GPIO_MINOR_V)
538 return virtual_gpio_ioctl(file, cmd, arg);
539#endif
540
541 if (priv->minor == GPIO_MINOR_LEDS)
542 return gpio_leds_ioctl(cmd, arg);
543
544 if (priv->minor >= GPIO_MINOR_PWM0 &&
545 priv->minor <= GPIO_MINOR_LAST_PWM)
546 return gpio_pwm_ioctl(priv, cmd, arg);
547
548 switch (_IOC_NR(cmd)) {
549 case IO_READBITS: /* Use IO_READ_INBITS and IO_READ_OUTBITS instead */
550 /* Read the port. */
551 return readl(data_in[priv->minor]);
552 case IO_SETBITS:
553 spin_lock_irqsave(&gpio_lock, flags);
554 /* Set changeable bits with a 1 in arg. */
555 shadow = readl(data_out[priv->minor]) |
556 (arg & changeable_bits[priv->minor]);
557 writel(shadow, data_out[priv->minor]);
558 spin_unlock_irqrestore(&gpio_lock, flags);
559 break;
560 case IO_CLRBITS:
561 spin_lock_irqsave(&gpio_lock, flags);
562 /* Clear changeable bits with a 1 in arg. */
563 shadow = readl(data_out[priv->minor]) &
564 ~(arg & changeable_bits[priv->minor]);
565 writel(shadow, data_out[priv->minor]);
566 spin_unlock_irqrestore(&gpio_lock, flags);
567 break;
568 case IO_HIGHALARM:
569 /* Set alarm when bits with 1 in arg go high. */
570 priv->highalarm |= arg;
571 gpio_set_alarm(priv);
572 break;
573 case IO_LOWALARM:
574 /* Set alarm when bits with 1 in arg go low. */
575 priv->lowalarm |= arg;
576 gpio_set_alarm(priv);
577 break;
578 case IO_CLRALARM:
579 /* Clear alarm for bits with 1 in arg. */
580 priv->highalarm &= ~arg;
581 priv->lowalarm &= ~arg;
582 gpio_set_alarm(priv);
583 break;
584 case IO_READDIR: /* Use IO_SETGET_INPUT/OUTPUT instead! */
585 /* Read direction 0=input 1=output */
586 return readl(dir_oe[priv->minor]);
587
588 case IO_SETINPUT: /* Use IO_SETGET_INPUT instead! */
589 /* Set direction 0=unchanged 1=input,
590 * return mask with 1=input
591 */
592 return setget_input(priv, arg);
593
594 case IO_SETOUTPUT: /* Use IO_SETGET_OUTPUT instead! */
595 /* Set direction 0=unchanged 1=output,
596 * return mask with 1=output
597 */
598 return setget_output(priv, arg);
599
600 case IO_CFG_WRITE_MODE:
601 {
602 int res = -EPERM;
603 unsigned long dir_shadow, clk_mask, data_mask, write_msb;
604
605 clk_mask = arg & 0xFF;
606 data_mask = (arg >> 8) & 0xFF;
607 write_msb = (arg >> 16) & 0x01;
608
609 /* Check if we're allowed to change the bits and
610 * the direction is correct
611 */
612 spin_lock_irqsave(&gpio_lock, flags);
613 dir_shadow = readl(dir_oe[priv->minor]);
614 if ((clk_mask & changeable_bits[priv->minor]) &&
615 (data_mask & changeable_bits[priv->minor]) &&
616 (clk_mask & dir_shadow) &&
617 (data_mask & dir_shadow)) {
618 priv->clk_mask = clk_mask;
619 priv->data_mask = data_mask;
620 priv->write_msb = write_msb;
621 res = 0;
622 }
623 spin_unlock_irqrestore(&gpio_lock, flags);
624
625 return res;
626 }
627 case IO_READ_INBITS:
628 /* *arg is result of reading the input pins */
629 val = readl(data_in[priv->minor]);
630 if (copy_to_user((void __user *)arg, &val, sizeof(val)))
631 return -EFAULT;
632 return 0;
633 case IO_READ_OUTBITS:
634 /* *arg is result of reading the output shadow */
635 val = *data_out[priv->minor];
636 if (copy_to_user((void __user *)arg, &val, sizeof(val)))
637 return -EFAULT;
638 break;
639 case IO_SETGET_INPUT:
640 /* bits set in *arg is set to input,
641 * *arg updated with current input pins.
642 */
643 if (copy_from_user(&val, (void __user *)arg, sizeof(val)))
644 return -EFAULT;
645 val = setget_input(priv, val);
646 if (copy_to_user((void __user *)arg, &val, sizeof(val)))
647 return -EFAULT;
648 break;
649 case IO_SETGET_OUTPUT:
650 /* bits set in *arg is set to output,
651 * *arg updated with current output pins.
652 */
653 if (copy_from_user(&val, (void __user *)arg, sizeof(val)))
654 return -EFAULT;
655 val = setget_output(priv, val);
656 if (copy_to_user((void __user *)arg, &val, sizeof(val)))
657 return -EFAULT;
658 break;
659 default:
660 return -EINVAL;
661 } /* switch */
662
663 return 0;
664}
665
666static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
667{
668 long ret;
669
670 mutex_lock(&gpio_mutex);
671 ret = gpio_ioctl_unlocked(file, cmd, arg);
672 mutex_unlock(&gpio_mutex);
673
674 return ret;
675}
676
677#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
678static int virtual_gpio_ioctl(struct file *file, unsigned int cmd,
679 unsigned long arg)
680{
681 unsigned long flags;
682 unsigned short val;
683 unsigned short shadow;
684 struct gpio_private *priv = file->private_data;
685
686 switch (_IOC_NR(cmd)) {
687 case IO_SETBITS:
688 spin_lock_irqsave(&gpio_lock, flags);
689 /* Set changeable bits with a 1 in arg. */
690 i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
691 shadow |= ~readl(dir_oe[priv->minor]) |
692 (arg & changeable_bits[priv->minor]);
693 i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
694 spin_unlock_irqrestore(&gpio_lock, flags);
695 break;
696 case IO_CLRBITS:
697 spin_lock_irqsave(&gpio_lock, flags);
698 /* Clear changeable bits with a 1 in arg. */
699 i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
700 shadow |= ~readl(dir_oe[priv->minor]) &
701 ~(arg & changeable_bits[priv->minor]);
702 i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
703 spin_unlock_irqrestore(&gpio_lock, flags);
704 break;
705 case IO_HIGHALARM:
706 /* Set alarm when bits with 1 in arg go high. */
707 priv->highalarm |= arg;
708 break;
709 case IO_LOWALARM:
710 /* Set alarm when bits with 1 in arg go low. */
711 priv->lowalarm |= arg;
712 break;
713 case IO_CLRALARM:
714 /* Clear alarm for bits with 1 in arg. */
715 priv->highalarm &= ~arg;
716 priv->lowalarm &= ~arg;
717 break;
718 case IO_CFG_WRITE_MODE:
719 {
720 unsigned long dir_shadow;
721 dir_shadow = readl(dir_oe[priv->minor]);
722
723 priv->clk_mask = arg & 0xFF;
724 priv->data_mask = (arg >> 8) & 0xFF;
725 priv->write_msb = (arg >> 16) & 0x01;
726 /* Check if we're allowed to change the bits and
727 * the direction is correct
728 */
729 if (!((priv->clk_mask & changeable_bits[priv->minor]) &&
730 (priv->data_mask & changeable_bits[priv->minor]) &&
731 (priv->clk_mask & dir_shadow) &&
732 (priv->data_mask & dir_shadow))) {
733 priv->clk_mask = 0;
734 priv->data_mask = 0;
735 return -EPERM;
736 }
737 break;
738 }
739 case IO_READ_INBITS:
740 /* *arg is result of reading the input pins */
741 val = cached_virtual_gpio_read & ~readl(dir_oe[priv->minor]);
742 if (copy_to_user((void __user *)arg, &val, sizeof(val)))
743 return -EFAULT;
744 return 0;
745
746 case IO_READ_OUTBITS:
747 /* *arg is result of reading the output shadow */
748 i2c_read(VIRT_I2C_ADDR, (void *)&val, sizeof(val));
749 val &= readl(dir_oe[priv->minor]);
750 if (copy_to_user((void __user *)arg, &val, sizeof(val)))
751 return -EFAULT;
752 break;
753 case IO_SETGET_INPUT:
754 {
755 /* bits set in *arg is set to input,
756 * *arg updated with current input pins.
757 */
758 unsigned short input_mask = ~readl(dir_oe[priv->minor]);
759 if (copy_from_user(&val, (void __user *)arg, sizeof(val)))
760 return -EFAULT;
761 val = setget_input(priv, val);
762 if (copy_to_user((void __user *)arg, &val, sizeof(val)))
763 return -EFAULT;
764 if ((input_mask & val) != input_mask) {
765 /* Input pins changed. All ports desired as input
766 * should be set to logic 1.
767 */
768 unsigned short change = input_mask ^ val;
769 i2c_read(VIRT_I2C_ADDR, (void *)&shadow,
770 sizeof(shadow));
771 shadow &= ~change;
772 shadow |= val;
773 i2c_write(VIRT_I2C_ADDR, (void *)&shadow,
774 sizeof(shadow));
775 }
776 break;
777 }
778 case IO_SETGET_OUTPUT:
779 /* bits set in *arg is set to output,
780 * *arg updated with current output pins.
781 */
782 if (copy_from_user(&val, (void __user *)arg, sizeof(val)))
783 return -EFAULT;
784 val = setget_output(priv, val);
785 if (copy_to_user((void __user *)arg, &val, sizeof(val)))
786 return -EFAULT;
787 break;
788 default:
789 return -EINVAL;
790 } /* switch */
791 return 0;
792}
793#endif /* CONFIG_ETRAX_VIRTUAL_GPIO */
794
795static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg)
796{
797 unsigned char green;
798 unsigned char red;
799
800 switch (_IOC_NR(cmd)) {
801 case IO_LEDACTIVE_SET:
802 green = ((unsigned char) arg) & 1;
803 red = (((unsigned char) arg) >> 1) & 1;
804 CRIS_LED_ACTIVE_SET_G(green);
805 CRIS_LED_ACTIVE_SET_R(red);
806 break;
807
808 default:
809 return -EINVAL;
810 } /* switch */
811
812 return 0;
813}
814
815static int gpio_pwm_set_mode(unsigned long arg, int pwm_port)
816{
817 int pinmux_pwm = pinmux_pwm0 + pwm_port;
818 int mode;
819 reg_gio_rw_pwm0_ctrl rw_pwm_ctrl = {
820 .ccd_val = 0,
821 .ccd_override = regk_gio_no,
822 .mode = regk_gio_no
823 };
824 int allocstatus;
825
826 if (get_user(mode, &((struct io_pwm_set_mode *) arg)->mode))
827 return -EFAULT;
828 rw_pwm_ctrl.mode = mode;
829 if (mode != PWM_OFF)
830 allocstatus = crisv32_pinmux_alloc_fixed(pinmux_pwm);
831 else
832 allocstatus = crisv32_pinmux_dealloc_fixed(pinmux_pwm);
833 if (allocstatus)
834 return allocstatus;
835 REG_WRITE(reg_gio_rw_pwm0_ctrl, REG_ADDR(gio, regi_gio, rw_pwm0_ctrl) +
836 12 * pwm_port, rw_pwm_ctrl);
837 return 0;
838}
839
840static int gpio_pwm_set_period(unsigned long arg, int pwm_port)
841{
842 struct io_pwm_set_period periods;
843 reg_gio_rw_pwm0_var rw_pwm_widths;
844
845 if (copy_from_user(&periods, (void __user *)arg, sizeof(periods)))
846 return -EFAULT;
847 if (periods.lo > 8191 || periods.hi > 8191)
848 return -EINVAL;
849 rw_pwm_widths.lo = periods.lo;
850 rw_pwm_widths.hi = periods.hi;
851 REG_WRITE(reg_gio_rw_pwm0_var, REG_ADDR(gio, regi_gio, rw_pwm0_var) +
852 12 * pwm_port, rw_pwm_widths);
853 return 0;
854}
855
856static int gpio_pwm_set_duty(unsigned long arg, int pwm_port)
857{
858 unsigned int duty;
859 reg_gio_rw_pwm0_data rw_pwm_duty;
860
861 if (get_user(duty, &((struct io_pwm_set_duty *) arg)->duty))
862 return -EFAULT;
863 if (duty > 255)
864 return -EINVAL;
865 rw_pwm_duty.data = duty;
866 REG_WRITE(reg_gio_rw_pwm0_data, REG_ADDR(gio, regi_gio, rw_pwm0_data) +
867 12 * pwm_port, rw_pwm_duty);
868 return 0;
869}
870
871static int gpio_pwm_ioctl(struct gpio_private *priv, unsigned int cmd,
872 unsigned long arg)
873{
874 int pwm_port = priv->minor - GPIO_MINOR_PWM0;
875
876 switch (_IOC_NR(cmd)) {
877 case IO_PWM_SET_MODE:
878 return gpio_pwm_set_mode(arg, pwm_port);
879 case IO_PWM_SET_PERIOD:
880 return gpio_pwm_set_period(arg, pwm_port);
881 case IO_PWM_SET_DUTY:
882 return gpio_pwm_set_duty(arg, pwm_port);
883 default:
884 return -EINVAL;
885 }
886 return 0;
887}
888
889static const struct file_operations gpio_fops = {
890 .owner = THIS_MODULE,
891 .poll = gpio_poll,
892 .unlocked_ioctl = gpio_ioctl,
893 .write = gpio_write,
894 .open = gpio_open,
895 .release = gpio_release,
896 .llseek = noop_llseek,
897};
898
899#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
900static void __init virtual_gpio_init(void)
901{
902 reg_gio_rw_intr_cfg intr_cfg;
903 reg_gio_rw_intr_mask intr_mask;
904 unsigned short shadow;
905
906 shadow = ~virtual_rw_pv_oe; /* Input ports should be set to logic 1 */
907 shadow |= CONFIG_ETRAX_DEF_GIO_PV_OUT;
908 i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
909
910 /* Set interrupt mask and on what state the interrupt shall trigger.
911 * For virtual gpio the interrupt shall trigger on logic '0'.
912 */
913 intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg);
914 intr_mask = REG_RD(gio, regi_gio, rw_intr_mask);
915
916 switch (CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN) {
917 case 0:
918 intr_cfg.pa0 = regk_gio_lo;
919 intr_mask.pa0 = regk_gio_yes;
920 break;
921 case 1:
922 intr_cfg.pa1 = regk_gio_lo;
923 intr_mask.pa1 = regk_gio_yes;
924 break;
925 case 2:
926 intr_cfg.pa2 = regk_gio_lo;
927 intr_mask.pa2 = regk_gio_yes;
928 break;
929 case 3:
930 intr_cfg.pa3 = regk_gio_lo;
931 intr_mask.pa3 = regk_gio_yes;
932 break;
933 case 4:
934 intr_cfg.pa4 = regk_gio_lo;
935 intr_mask.pa4 = regk_gio_yes;
936 break;
937 case 5:
938 intr_cfg.pa5 = regk_gio_lo;
939 intr_mask.pa5 = regk_gio_yes;
940 break;
941 case 6:
942 intr_cfg.pa6 = regk_gio_lo;
943 intr_mask.pa6 = regk_gio_yes;
944 break;
945 case 7:
946 intr_cfg.pa7 = regk_gio_lo;
947 intr_mask.pa7 = regk_gio_yes;
948 break;
949 }
950
951 REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg);
952 REG_WR(gio, regi_gio, rw_intr_mask, intr_mask);
953}
954#endif
955
956/* main driver initialization routine, called from mem.c */
957
958static int __init gpio_init(void)
959{
960 int res, res2;
961
962 printk(KERN_INFO "ETRAX FS GPIO driver v2.7, (c) 2003-2008 "
963 "Axis Communications AB\n");
964
965 /* do the formalities */
966
967 res = register_chrdev(GPIO_MAJOR, gpio_name, &gpio_fops);
968 if (res < 0) {
969 printk(KERN_ERR "gpio: couldn't get a major number.\n");
970 return res;
971 }
972
973 /* Clear all leds */
974 CRIS_LED_NETWORK_GRP0_SET(0);
975 CRIS_LED_NETWORK_GRP1_SET(0);
976 CRIS_LED_ACTIVE_SET(0);
977 CRIS_LED_DISK_READ(0);
978 CRIS_LED_DISK_WRITE(0);
979
980 res2 = request_irq(GIO_INTR_VECT, gpio_interrupt,
981 IRQF_SHARED, "gpio", &alarmlist);
982 if (res2) {
983 printk(KERN_ERR "err: irq for gpio\n");
984 return res2;
985 }
986
987 /* No IRQs by default. */
988 REG_WR_INT(gio, regi_gio, rw_intr_pins, 0);
989
990#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
991 virtual_gpio_init();
992#endif
993
994 return res;
995}
996
997/* this makes sure that gpio_init is called during kernel boot */
998
999module_init(gpio_init);
diff --git a/arch/cris/arch-v32/drivers/mach-fs/Makefile b/arch/cris/arch-v32/drivers/mach-fs/Makefile
index 5c6d2a2a080e..59028d0b981c 100644
--- a/arch/cris/arch-v32/drivers/mach-fs/Makefile
+++ b/arch/cris/arch-v32/drivers/mach-fs/Makefile
@@ -3,4 +3,3 @@
3# 3#
4 4
5obj-$(CONFIG_ETRAX_NANDFLASH) += nandflash.o 5obj-$(CONFIG_ETRAX_NANDFLASH) += nandflash.o
6obj-$(CONFIG_ETRAX_GPIO) += gpio.o
diff --git a/arch/cris/arch-v32/drivers/mach-fs/gpio.c b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
deleted file mode 100644
index 72968fbf814b..000000000000
--- a/arch/cris/arch-v32/drivers/mach-fs/gpio.c
+++ /dev/null
@@ -1,978 +0,0 @@
1/*
2 * ETRAX CRISv32 general port I/O device
3 *
4 * Copyright (c) 1999-2006 Axis Communications AB
5 *
6 * Authors: Bjorn Wesen (initial version)
7 * Ola Knutsson (LED handling)
8 * Johan Adolfsson (read/set directions, write, port G,
9 * port to ETRAX FS.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/ioport.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19#include <linux/fs.h>
20#include <linux/string.h>
21#include <linux/poll.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/spinlock.h>
25#include <linux/mutex.h>
26
27#include <asm/etraxgpio.h>
28#include <hwregs/reg_map.h>
29#include <hwregs/reg_rdwr.h>
30#include <hwregs/gio_defs.h>
31#include <hwregs/intr_vect_defs.h>
32#include <asm/io.h>
33#include <asm/irq.h>
34
35#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
36#include "../i2c.h"
37
38#define VIRT_I2C_ADDR 0x40
39#endif
40
41/* The following gio ports on ETRAX FS is available:
42 * pa 8 bits, supports interrupts off, hi, low, set, posedge, negedge anyedge
43 * pb 18 bits
44 * pc 18 bits
45 * pd 18 bits
46 * pe 18 bits
47 * each port has a rw_px_dout, r_px_din and rw_px_oe register.
48 */
49
50#define GPIO_MAJOR 120 /* experimental MAJOR number */
51
52#define D(x)
53
54#if 0
55static int dp_cnt;
56#define DP(x) \
57 do { \
58 dp_cnt++; \
59 if (dp_cnt % 1000 == 0) \
60 x; \
61 } while (0)
62#else
63#define DP(x)
64#endif
65
66static DEFINE_MUTEX(gpio_mutex);
67static char gpio_name[] = "etrax gpio";
68
69#if 0
70static wait_queue_head_t *gpio_wq;
71#endif
72
73#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
74static int virtual_gpio_ioctl(struct file *file, unsigned int cmd,
75 unsigned long arg);
76#endif
77static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
78static ssize_t gpio_write(struct file *file, const char *buf, size_t count,
79 loff_t *off);
80static int gpio_open(struct inode *inode, struct file *filp);
81static int gpio_release(struct inode *inode, struct file *filp);
82static unsigned int gpio_poll(struct file *filp,
83 struct poll_table_struct *wait);
84
85/* private data per open() of this driver */
86
87struct gpio_private {
88 struct gpio_private *next;
89 /* The IO_CFG_WRITE_MODE_VALUE only support 8 bits: */
90 unsigned char clk_mask;
91 unsigned char data_mask;
92 unsigned char write_msb;
93 unsigned char pad1;
94 /* These fields are generic */
95 unsigned long highalarm, lowalarm;
96 wait_queue_head_t alarm_wq;
97 int minor;
98};
99
100/* linked list of alarms to check for */
101
102static struct gpio_private *alarmlist;
103
104static int gpio_some_alarms; /* Set if someone uses alarm */
105static unsigned long gpio_pa_high_alarms;
106static unsigned long gpio_pa_low_alarms;
107
108static DEFINE_SPINLOCK(alarm_lock);
109
110#define NUM_PORTS (GPIO_MINOR_LAST+1)
111#define GIO_REG_RD_ADDR(reg) \
112 (volatile unsigned long *)(regi_gio + REG_RD_ADDR_gio_##reg)
113#define GIO_REG_WR_ADDR(reg) \
114 (volatile unsigned long *)(regi_gio + REG_RD_ADDR_gio_##reg)
115unsigned long led_dummy;
116#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
117static unsigned long virtual_dummy;
118static unsigned long virtual_rw_pv_oe = CONFIG_ETRAX_DEF_GIO_PV_OE;
119static unsigned short cached_virtual_gpio_read;
120#endif
121
122static volatile unsigned long *data_out[NUM_PORTS] = {
123 GIO_REG_WR_ADDR(rw_pa_dout),
124 GIO_REG_WR_ADDR(rw_pb_dout),
125 &led_dummy,
126 GIO_REG_WR_ADDR(rw_pc_dout),
127 GIO_REG_WR_ADDR(rw_pd_dout),
128 GIO_REG_WR_ADDR(rw_pe_dout),
129#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
130 &virtual_dummy,
131#endif
132};
133
134static volatile unsigned long *data_in[NUM_PORTS] = {
135 GIO_REG_RD_ADDR(r_pa_din),
136 GIO_REG_RD_ADDR(r_pb_din),
137 &led_dummy,
138 GIO_REG_RD_ADDR(r_pc_din),
139 GIO_REG_RD_ADDR(r_pd_din),
140 GIO_REG_RD_ADDR(r_pe_din),
141#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
142 &virtual_dummy,
143#endif
144};
145
146static unsigned long changeable_dir[NUM_PORTS] = {
147 CONFIG_ETRAX_PA_CHANGEABLE_DIR,
148 CONFIG_ETRAX_PB_CHANGEABLE_DIR,
149 0,
150 CONFIG_ETRAX_PC_CHANGEABLE_DIR,
151 CONFIG_ETRAX_PD_CHANGEABLE_DIR,
152 CONFIG_ETRAX_PE_CHANGEABLE_DIR,
153#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
154 CONFIG_ETRAX_PV_CHANGEABLE_DIR,
155#endif
156};
157
158static unsigned long changeable_bits[NUM_PORTS] = {
159 CONFIG_ETRAX_PA_CHANGEABLE_BITS,
160 CONFIG_ETRAX_PB_CHANGEABLE_BITS,
161 0,
162 CONFIG_ETRAX_PC_CHANGEABLE_BITS,
163 CONFIG_ETRAX_PD_CHANGEABLE_BITS,
164 CONFIG_ETRAX_PE_CHANGEABLE_BITS,
165#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
166 CONFIG_ETRAX_PV_CHANGEABLE_BITS,
167#endif
168};
169
170static volatile unsigned long *dir_oe[NUM_PORTS] = {
171 GIO_REG_WR_ADDR(rw_pa_oe),
172 GIO_REG_WR_ADDR(rw_pb_oe),
173 &led_dummy,
174 GIO_REG_WR_ADDR(rw_pc_oe),
175 GIO_REG_WR_ADDR(rw_pd_oe),
176 GIO_REG_WR_ADDR(rw_pe_oe),
177#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
178 &virtual_rw_pv_oe,
179#endif
180};
181
182
183
184static unsigned int gpio_poll(struct file *file, struct poll_table_struct *wait)
185{
186 unsigned int mask = 0;
187 struct gpio_private *priv = file->private_data;
188 unsigned long data;
189 poll_wait(file, &priv->alarm_wq, wait);
190 if (priv->minor == GPIO_MINOR_A) {
191 reg_gio_rw_intr_cfg intr_cfg;
192 unsigned long tmp;
193 unsigned long flags;
194
195 local_irq_save(flags);
196 data = REG_TYPE_CONV(unsigned long, reg_gio_r_pa_din,
197 REG_RD(gio, regi_gio, r_pa_din));
198 /* PA has support for interrupt
199 * lets activate high for those low and with highalarm set
200 */
201 intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg);
202
203 tmp = ~data & priv->highalarm & 0xFF;
204 if (tmp & (1 << 0))
205 intr_cfg.pa0 = regk_gio_hi;
206 if (tmp & (1 << 1))
207 intr_cfg.pa1 = regk_gio_hi;
208 if (tmp & (1 << 2))
209 intr_cfg.pa2 = regk_gio_hi;
210 if (tmp & (1 << 3))
211 intr_cfg.pa3 = regk_gio_hi;
212 if (tmp & (1 << 4))
213 intr_cfg.pa4 = regk_gio_hi;
214 if (tmp & (1 << 5))
215 intr_cfg.pa5 = regk_gio_hi;
216 if (tmp & (1 << 6))
217 intr_cfg.pa6 = regk_gio_hi;
218 if (tmp & (1 << 7))
219 intr_cfg.pa7 = regk_gio_hi;
220 /*
221 * lets activate low for those high and with lowalarm set
222 */
223 tmp = data & priv->lowalarm & 0xFF;
224 if (tmp & (1 << 0))
225 intr_cfg.pa0 = regk_gio_lo;
226 if (tmp & (1 << 1))
227 intr_cfg.pa1 = regk_gio_lo;
228 if (tmp & (1 << 2))
229 intr_cfg.pa2 = regk_gio_lo;
230 if (tmp & (1 << 3))
231 intr_cfg.pa3 = regk_gio_lo;
232 if (tmp & (1 << 4))
233 intr_cfg.pa4 = regk_gio_lo;
234 if (tmp & (1 << 5))
235 intr_cfg.pa5 = regk_gio_lo;
236 if (tmp & (1 << 6))
237 intr_cfg.pa6 = regk_gio_lo;
238 if (tmp & (1 << 7))
239 intr_cfg.pa7 = regk_gio_lo;
240
241 REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg);
242 local_irq_restore(flags);
243 } else if (priv->minor <= GPIO_MINOR_E)
244 data = *data_in[priv->minor];
245 else
246 return 0;
247
248 if ((data & priv->highalarm) || (~data & priv->lowalarm))
249 mask = POLLIN|POLLRDNORM;
250
251 DP(printk(KERN_DEBUG "gpio_poll ready: mask 0x%08X\n", mask));
252 return mask;
253}
254
255int etrax_gpio_wake_up_check(void)
256{
257 struct gpio_private *priv;
258 unsigned long data = 0;
259 unsigned long flags;
260 int ret = 0;
261 spin_lock_irqsave(&alarm_lock, flags);
262 priv = alarmlist;
263 while (priv) {
264#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
265 if (priv->minor == GPIO_MINOR_V)
266 data = (unsigned long)cached_virtual_gpio_read;
267 else {
268 data = *data_in[priv->minor];
269 if (priv->minor == GPIO_MINOR_A)
270 priv->lowalarm |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
271 }
272#else
273 data = *data_in[priv->minor];
274#endif
275 if ((data & priv->highalarm) ||
276 (~data & priv->lowalarm)) {
277 DP(printk(KERN_DEBUG
278 "etrax_gpio_wake_up_check %i\n", priv->minor));
279 wake_up_interruptible(&priv->alarm_wq);
280 ret = 1;
281 }
282 priv = priv->next;
283 }
284 spin_unlock_irqrestore(&alarm_lock, flags);
285 return ret;
286}
287
288static irqreturn_t
289gpio_poll_timer_interrupt(int irq, void *dev_id)
290{
291 if (gpio_some_alarms)
292 return IRQ_RETVAL(etrax_gpio_wake_up_check());
293 return IRQ_NONE;
294}
295
296static irqreturn_t
297gpio_pa_interrupt(int irq, void *dev_id)
298{
299 reg_gio_rw_intr_mask intr_mask;
300 reg_gio_r_masked_intr masked_intr;
301 reg_gio_rw_ack_intr ack_intr;
302 unsigned long tmp;
303 unsigned long tmp2;
304#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
305 unsigned char enable_gpiov_ack = 0;
306#endif
307
308 /* Find what PA interrupts are active */
309 masked_intr = REG_RD(gio, regi_gio, r_masked_intr);
310 tmp = REG_TYPE_CONV(unsigned long, reg_gio_r_masked_intr, masked_intr);
311
312 /* Find those that we have enabled */
313 spin_lock(&alarm_lock);
314 tmp &= (gpio_pa_high_alarms | gpio_pa_low_alarms);
315 spin_unlock(&alarm_lock);
316
317#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
318 /* Something changed on virtual GPIO. Interrupt is acked by
319 * reading the device.
320 */
321 if (tmp & (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN)) {
322 i2c_read(VIRT_I2C_ADDR, (void *)&cached_virtual_gpio_read,
323 sizeof(cached_virtual_gpio_read));
324 enable_gpiov_ack = 1;
325 }
326#endif
327
328 /* Ack them */
329 ack_intr = REG_TYPE_CONV(reg_gio_rw_ack_intr, unsigned long, tmp);
330 REG_WR(gio, regi_gio, rw_ack_intr, ack_intr);
331
332 /* Disable those interrupts.. */
333 intr_mask = REG_RD(gio, regi_gio, rw_intr_mask);
334 tmp2 = REG_TYPE_CONV(unsigned long, reg_gio_rw_intr_mask, intr_mask);
335 tmp2 &= ~tmp;
336#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
337 /* Do not disable interrupt on virtual GPIO. Changes on virtual
338 * pins are only noticed by an interrupt.
339 */
340 if (enable_gpiov_ack)
341 tmp2 |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
342#endif
343 intr_mask = REG_TYPE_CONV(reg_gio_rw_intr_mask, unsigned long, tmp2);
344 REG_WR(gio, regi_gio, rw_intr_mask, intr_mask);
345
346 if (gpio_some_alarms)
347 return IRQ_RETVAL(etrax_gpio_wake_up_check());
348 return IRQ_NONE;
349}
350
351
352static ssize_t gpio_write(struct file *file, const char *buf, size_t count,
353 loff_t *off)
354{
355 struct gpio_private *priv = file->private_data;
356 unsigned char data, clk_mask, data_mask, write_msb;
357 unsigned long flags;
358 unsigned long shadow;
359 volatile unsigned long *port;
360 ssize_t retval = count;
361 /* Only bits 0-7 may be used for write operations but allow all
362 devices except leds... */
363#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
364 if (priv->minor == GPIO_MINOR_V)
365 return -EFAULT;
366#endif
367 if (priv->minor == GPIO_MINOR_LEDS)
368 return -EFAULT;
369
370 if (!access_ok(VERIFY_READ, buf, count))
371 return -EFAULT;
372 clk_mask = priv->clk_mask;
373 data_mask = priv->data_mask;
374 /* It must have been configured using the IO_CFG_WRITE_MODE */
375 /* Perhaps a better error code? */
376 if (clk_mask == 0 || data_mask == 0)
377 return -EPERM;
378 write_msb = priv->write_msb;
379 D(printk(KERN_DEBUG "gpio_write: %lu to data 0x%02X clk 0x%02X "
380 "msb: %i\n", count, data_mask, clk_mask, write_msb));
381 port = data_out[priv->minor];
382
383 while (count--) {
384 int i;
385 data = *buf++;
386 if (priv->write_msb) {
387 for (i = 7; i >= 0; i--) {
388 local_irq_save(flags);
389 shadow = *port;
390 *port = shadow &= ~clk_mask;
391 if (data & 1<<i)
392 *port = shadow |= data_mask;
393 else
394 *port = shadow &= ~data_mask;
395 /* For FPGA: min 5.0ns (DCC) before CCLK high */
396 *port = shadow |= clk_mask;
397 local_irq_restore(flags);
398 }
399 } else {
400 for (i = 0; i <= 7; i++) {
401 local_irq_save(flags);
402 shadow = *port;
403 *port = shadow &= ~clk_mask;
404 if (data & 1<<i)
405 *port = shadow |= data_mask;
406 else
407 *port = shadow &= ~data_mask;
408 /* For FPGA: min 5.0ns (DCC) before CCLK high */
409 *port = shadow |= clk_mask;
410 local_irq_restore(flags);
411 }
412 }
413 }
414 return retval;
415}
416
417
418
419static int
420gpio_open(struct inode *inode, struct file *filp)
421{
422 struct gpio_private *priv;
423 int p = iminor(inode);
424
425 if (p > GPIO_MINOR_LAST)
426 return -EINVAL;
427
428 priv = kzalloc(sizeof(struct gpio_private), GFP_KERNEL);
429 if (!priv)
430 return -ENOMEM;
431
432 mutex_lock(&gpio_mutex);
433
434 priv->minor = p;
435
436 /* initialize the io/alarm struct */
437
438 priv->clk_mask = 0;
439 priv->data_mask = 0;
440 priv->highalarm = 0;
441 priv->lowalarm = 0;
442 init_waitqueue_head(&priv->alarm_wq);
443
444 filp->private_data = (void *)priv;
445
446 /* link it into our alarmlist */
447 spin_lock_irq(&alarm_lock);
448 priv->next = alarmlist;
449 alarmlist = priv;
450 spin_unlock_irq(&alarm_lock);
451
452 mutex_unlock(&gpio_mutex);
453 return 0;
454}
455
456static int
457gpio_release(struct inode *inode, struct file *filp)
458{
459 struct gpio_private *p;
460 struct gpio_private *todel;
461 /* local copies while updating them: */
462 unsigned long a_high, a_low;
463 unsigned long some_alarms;
464
465 /* unlink from alarmlist and free the private structure */
466
467 spin_lock_irq(&alarm_lock);
468 p = alarmlist;
469 todel = filp->private_data;
470
471 if (p == todel) {
472 alarmlist = todel->next;
473 } else {
474 while (p->next != todel)
475 p = p->next;
476 p->next = todel->next;
477 }
478
479 kfree(todel);
480 /* Check if there are still any alarms set */
481 p = alarmlist;
482 some_alarms = 0;
483 a_high = 0;
484 a_low = 0;
485 while (p) {
486 if (p->minor == GPIO_MINOR_A) {
487#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
488 p->lowalarm |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
489#endif
490 a_high |= p->highalarm;
491 a_low |= p->lowalarm;
492 }
493
494 if (p->highalarm | p->lowalarm)
495 some_alarms = 1;
496 p = p->next;
497 }
498
499#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
500 /* Variables 'some_alarms' and 'a_low' needs to be set here again
501 * to ensure that interrupt for virtual GPIO is handled.
502 */
503 some_alarms = 1;
504 a_low |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
505#endif
506
507 gpio_some_alarms = some_alarms;
508 gpio_pa_high_alarms = a_high;
509 gpio_pa_low_alarms = a_low;
510 spin_unlock_irq(&alarm_lock);
511
512 return 0;
513}
514
515/* Main device API. ioctl's to read/set/clear bits, as well as to
516 * set alarms to wait for using a subsequent select().
517 */
518
519inline unsigned long setget_input(struct gpio_private *priv, unsigned long arg)
520{
521 /* Set direction 0=unchanged 1=input,
522 * return mask with 1=input
523 */
524 unsigned long flags;
525 unsigned long dir_shadow;
526
527 local_irq_save(flags);
528 dir_shadow = *dir_oe[priv->minor];
529 dir_shadow &= ~(arg & changeable_dir[priv->minor]);
530 *dir_oe[priv->minor] = dir_shadow;
531 local_irq_restore(flags);
532
533 if (priv->minor == GPIO_MINOR_A)
534 dir_shadow ^= 0xFF; /* Only 8 bits */
535#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
536 else if (priv->minor == GPIO_MINOR_V)
537 dir_shadow ^= 0xFFFF; /* Only 16 bits */
538#endif
539 else
540 dir_shadow ^= 0x3FFFF; /* Only 18 bits */
541 return dir_shadow;
542
543} /* setget_input */
544
545inline unsigned long setget_output(struct gpio_private *priv, unsigned long arg)
546{
547 unsigned long flags;
548 unsigned long dir_shadow;
549
550 local_irq_save(flags);
551 dir_shadow = *dir_oe[priv->minor];
552 dir_shadow |= (arg & changeable_dir[priv->minor]);
553 *dir_oe[priv->minor] = dir_shadow;
554 local_irq_restore(flags);
555 return dir_shadow;
556} /* setget_output */
557
558static int gpio_leds_ioctl(unsigned int cmd, unsigned long arg);
559
560static int
561gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
562{
563 unsigned long flags;
564 unsigned long val;
565 unsigned long shadow;
566 struct gpio_private *priv = file->private_data;
567 if (_IOC_TYPE(cmd) != ETRAXGPIO_IOCTYPE)
568 return -EINVAL;
569
570#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
571 if (priv->minor == GPIO_MINOR_V)
572 return virtual_gpio_ioctl(file, cmd, arg);
573#endif
574
575 switch (_IOC_NR(cmd)) {
576 case IO_READBITS: /* Use IO_READ_INBITS and IO_READ_OUTBITS instead */
577 /* Read the port. */
578 return *data_in[priv->minor];
579 break;
580 case IO_SETBITS:
581 local_irq_save(flags);
582 /* Set changeable bits with a 1 in arg. */
583 shadow = *data_out[priv->minor];
584 shadow |= (arg & changeable_bits[priv->minor]);
585 *data_out[priv->minor] = shadow;
586 local_irq_restore(flags);
587 break;
588 case IO_CLRBITS:
589 local_irq_save(flags);
590 /* Clear changeable bits with a 1 in arg. */
591 shadow = *data_out[priv->minor];
592 shadow &= ~(arg & changeable_bits[priv->minor]);
593 *data_out[priv->minor] = shadow;
594 local_irq_restore(flags);
595 break;
596 case IO_HIGHALARM:
597 /* Set alarm when bits with 1 in arg go high. */
598 priv->highalarm |= arg;
599 spin_lock_irqsave(&alarm_lock, flags);
600 gpio_some_alarms = 1;
601 if (priv->minor == GPIO_MINOR_A)
602 gpio_pa_high_alarms |= arg;
603 spin_unlock_irqrestore(&alarm_lock, flags);
604 break;
605 case IO_LOWALARM:
606 /* Set alarm when bits with 1 in arg go low. */
607 priv->lowalarm |= arg;
608 spin_lock_irqsave(&alarm_lock, flags);
609 gpio_some_alarms = 1;
610 if (priv->minor == GPIO_MINOR_A)
611 gpio_pa_low_alarms |= arg;
612 spin_unlock_irqrestore(&alarm_lock, flags);
613 break;
614 case IO_CLRALARM:
615 /* Clear alarm for bits with 1 in arg. */
616 priv->highalarm &= ~arg;
617 priv->lowalarm &= ~arg;
618 spin_lock_irqsave(&alarm_lock, flags);
619 if (priv->minor == GPIO_MINOR_A) {
620 if (gpio_pa_high_alarms & arg ||
621 gpio_pa_low_alarms & arg)
622 /* Must update the gpio_pa_*alarms masks */
623 ;
624 }
625 spin_unlock_irqrestore(&alarm_lock, flags);
626 break;
627 case IO_READDIR: /* Use IO_SETGET_INPUT/OUTPUT instead! */
628 /* Read direction 0=input 1=output */
629 return *dir_oe[priv->minor];
630 case IO_SETINPUT: /* Use IO_SETGET_INPUT instead! */
631 /* Set direction 0=unchanged 1=input,
632 * return mask with 1=input
633 */
634 return setget_input(priv, arg);
635 break;
636 case IO_SETOUTPUT: /* Use IO_SETGET_OUTPUT instead! */
637 /* Set direction 0=unchanged 1=output,
638 * return mask with 1=output
639 */
640 return setget_output(priv, arg);
641
642 case IO_CFG_WRITE_MODE:
643 {
644 unsigned long dir_shadow;
645 dir_shadow = *dir_oe[priv->minor];
646
647 priv->clk_mask = arg & 0xFF;
648 priv->data_mask = (arg >> 8) & 0xFF;
649 priv->write_msb = (arg >> 16) & 0x01;
650 /* Check if we're allowed to change the bits and
651 * the direction is correct
652 */
653 if (!((priv->clk_mask & changeable_bits[priv->minor]) &&
654 (priv->data_mask & changeable_bits[priv->minor]) &&
655 (priv->clk_mask & dir_shadow) &&
656 (priv->data_mask & dir_shadow))) {
657 priv->clk_mask = 0;
658 priv->data_mask = 0;
659 return -EPERM;
660 }
661 break;
662 }
663 case IO_READ_INBITS:
664 /* *arg is result of reading the input pins */
665 val = *data_in[priv->minor];
666 if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
667 return -EFAULT;
668 return 0;
669 break;
670 case IO_READ_OUTBITS:
671 /* *arg is result of reading the output shadow */
672 val = *data_out[priv->minor];
673 if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
674 return -EFAULT;
675 break;
676 case IO_SETGET_INPUT:
677 /* bits set in *arg is set to input,
678 * *arg updated with current input pins.
679 */
680 if (copy_from_user(&val, (unsigned long *)arg, sizeof(val)))
681 return -EFAULT;
682 val = setget_input(priv, val);
683 if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
684 return -EFAULT;
685 break;
686 case IO_SETGET_OUTPUT:
687 /* bits set in *arg is set to output,
688 * *arg updated with current output pins.
689 */
690 if (copy_from_user(&val, (unsigned long *)arg, sizeof(val)))
691 return -EFAULT;
692 val = setget_output(priv, val);
693 if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
694 return -EFAULT;
695 break;
696 default:
697 if (priv->minor == GPIO_MINOR_LEDS)
698 return gpio_leds_ioctl(cmd, arg);
699 else
700 return -EINVAL;
701 } /* switch */
702
703 return 0;
704}
705
706static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
707{
708 long ret;
709
710 mutex_lock(&gpio_mutex);
711 ret = gpio_ioctl_unlocked(file, cmd, arg);
712 mutex_unlock(&gpio_mutex);
713
714 return ret;
715}
716
717#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
718static int
719virtual_gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
720{
721 unsigned long flags;
722 unsigned short val;
723 unsigned short shadow;
724 struct gpio_private *priv = file->private_data;
725
726 switch (_IOC_NR(cmd)) {
727 case IO_SETBITS:
728 local_irq_save(flags);
729 /* Set changeable bits with a 1 in arg. */
730 i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
731 shadow |= ~*dir_oe[priv->minor];
732 shadow |= (arg & changeable_bits[priv->minor]);
733 i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
734 local_irq_restore(flags);
735 break;
736 case IO_CLRBITS:
737 local_irq_save(flags);
738 /* Clear changeable bits with a 1 in arg. */
739 i2c_read(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
740 shadow |= ~*dir_oe[priv->minor];
741 shadow &= ~(arg & changeable_bits[priv->minor]);
742 i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
743 local_irq_restore(flags);
744 break;
745 case IO_HIGHALARM:
746 /* Set alarm when bits with 1 in arg go high. */
747 priv->highalarm |= arg;
748 spin_lock(&alarm_lock);
749 gpio_some_alarms = 1;
750 spin_unlock(&alarm_lock);
751 break;
752 case IO_LOWALARM:
753 /* Set alarm when bits with 1 in arg go low. */
754 priv->lowalarm |= arg;
755 spin_lock(&alarm_lock);
756 gpio_some_alarms = 1;
757 spin_unlock(&alarm_lock);
758 break;
759 case IO_CLRALARM:
760 /* Clear alarm for bits with 1 in arg. */
761 priv->highalarm &= ~arg;
762 priv->lowalarm &= ~arg;
763 spin_lock(&alarm_lock);
764 spin_unlock(&alarm_lock);
765 break;
766 case IO_CFG_WRITE_MODE:
767 {
768 unsigned long dir_shadow;
769 dir_shadow = *dir_oe[priv->minor];
770
771 priv->clk_mask = arg & 0xFF;
772 priv->data_mask = (arg >> 8) & 0xFF;
773 priv->write_msb = (arg >> 16) & 0x01;
774 /* Check if we're allowed to change the bits and
775 * the direction is correct
776 */
777 if (!((priv->clk_mask & changeable_bits[priv->minor]) &&
778 (priv->data_mask & changeable_bits[priv->minor]) &&
779 (priv->clk_mask & dir_shadow) &&
780 (priv->data_mask & dir_shadow))) {
781 priv->clk_mask = 0;
782 priv->data_mask = 0;
783 return -EPERM;
784 }
785 break;
786 }
787 case IO_READ_INBITS:
788 /* *arg is result of reading the input pins */
789 val = cached_virtual_gpio_read;
790 val &= ~*dir_oe[priv->minor];
791 if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
792 return -EFAULT;
793 return 0;
794 break;
795 case IO_READ_OUTBITS:
796 /* *arg is result of reading the output shadow */
797 i2c_read(VIRT_I2C_ADDR, (void *)&val, sizeof(val));
798 val &= *dir_oe[priv->minor];
799 if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
800 return -EFAULT;
801 break;
802 case IO_SETGET_INPUT:
803 {
804 /* bits set in *arg is set to input,
805 * *arg updated with current input pins.
806 */
807 unsigned short input_mask = ~*dir_oe[priv->minor];
808 if (copy_from_user(&val, (unsigned long *)arg, sizeof(val)))
809 return -EFAULT;
810 val = setget_input(priv, val);
811 if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
812 return -EFAULT;
813 if ((input_mask & val) != input_mask) {
814 /* Input pins changed. All ports desired as input
815 * should be set to logic 1.
816 */
817 unsigned short change = input_mask ^ val;
818 i2c_read(VIRT_I2C_ADDR, (void *)&shadow,
819 sizeof(shadow));
820 shadow &= ~change;
821 shadow |= val;
822 i2c_write(VIRT_I2C_ADDR, (void *)&shadow,
823 sizeof(shadow));
824 }
825 break;
826 }
827 case IO_SETGET_OUTPUT:
828 /* bits set in *arg is set to output,
829 * *arg updated with current output pins.
830 */
831 if (copy_from_user(&val, (unsigned long *)arg, sizeof(val)))
832 return -EFAULT;
833 val = setget_output(priv, val);
834 if (copy_to_user((unsigned long *)arg, &val, sizeof(val)))
835 return -EFAULT;
836 break;
837 default:
838 return -EINVAL;
839 } /* switch */
840 return 0;
841}
842#endif /* CONFIG_ETRAX_VIRTUAL_GPIO */
843
844static int
845gpio_leds_ioctl(unsigned int cmd, unsigned long arg)
846{
847 unsigned char green;
848 unsigned char red;
849
850 switch (_IOC_NR(cmd)) {
851 case IO_LEDACTIVE_SET:
852 green = ((unsigned char) arg) & 1;
853 red = (((unsigned char) arg) >> 1) & 1;
854 CRIS_LED_ACTIVE_SET_G(green);
855 CRIS_LED_ACTIVE_SET_R(red);
856 break;
857
858 default:
859 return -EINVAL;
860 } /* switch */
861
862 return 0;
863}
864
865static const struct file_operations gpio_fops = {
866 .owner = THIS_MODULE,
867 .poll = gpio_poll,
868 .unlocked_ioctl = gpio_ioctl,
869 .write = gpio_write,
870 .open = gpio_open,
871 .release = gpio_release,
872 .llseek = noop_llseek,
873};
874
875#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
876static void
877virtual_gpio_init(void)
878{
879 reg_gio_rw_intr_cfg intr_cfg;
880 reg_gio_rw_intr_mask intr_mask;
881 unsigned short shadow;
882
883 shadow = ~virtual_rw_pv_oe; /* Input ports should be set to logic 1 */
884 shadow |= CONFIG_ETRAX_DEF_GIO_PV_OUT;
885 i2c_write(VIRT_I2C_ADDR, (void *)&shadow, sizeof(shadow));
886
887 /* Set interrupt mask and on what state the interrupt shall trigger.
888 * For virtual gpio the interrupt shall trigger on logic '0'.
889 */
890 intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg);
891 intr_mask = REG_RD(gio, regi_gio, rw_intr_mask);
892
893 switch (CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN) {
894 case 0:
895 intr_cfg.pa0 = regk_gio_lo;
896 intr_mask.pa0 = regk_gio_yes;
897 break;
898 case 1:
899 intr_cfg.pa1 = regk_gio_lo;
900 intr_mask.pa1 = regk_gio_yes;
901 break;
902 case 2:
903 intr_cfg.pa2 = regk_gio_lo;
904 intr_mask.pa2 = regk_gio_yes;
905 break;
906 case 3:
907 intr_cfg.pa3 = regk_gio_lo;
908 intr_mask.pa3 = regk_gio_yes;
909 break;
910 case 4:
911 intr_cfg.pa4 = regk_gio_lo;
912 intr_mask.pa4 = regk_gio_yes;
913 break;
914 case 5:
915 intr_cfg.pa5 = regk_gio_lo;
916 intr_mask.pa5 = regk_gio_yes;
917 break;
918 case 6:
919 intr_cfg.pa6 = regk_gio_lo;
920 intr_mask.pa6 = regk_gio_yes;
921 break;
922 case 7:
923 intr_cfg.pa7 = regk_gio_lo;
924 intr_mask.pa7 = regk_gio_yes;
925 break;
926 }
927
928 REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg);
929 REG_WR(gio, regi_gio, rw_intr_mask, intr_mask);
930
931 gpio_pa_low_alarms |= (1 << CONFIG_ETRAX_VIRTUAL_GPIO_INTERRUPT_PA_PIN);
932 gpio_some_alarms = 1;
933}
934#endif
935
936/* main driver initialization routine, called from mem.c */
937
938static __init int
939gpio_init(void)
940{
941 int res;
942
943 /* do the formalities */
944
945 res = register_chrdev(GPIO_MAJOR, gpio_name, &gpio_fops);
946 if (res < 0) {
947 printk(KERN_ERR "gpio: couldn't get a major number.\n");
948 return res;
949 }
950
951 /* Clear all leds */
952 CRIS_LED_NETWORK_GRP0_SET(0);
953 CRIS_LED_NETWORK_GRP1_SET(0);
954 CRIS_LED_ACTIVE_SET(0);
955 CRIS_LED_DISK_READ(0);
956 CRIS_LED_DISK_WRITE(0);
957
958 printk(KERN_INFO "ETRAX FS GPIO driver v2.5, (c) 2003-2007 "
959 "Axis Communications AB\n");
960 /* We call etrax_gpio_wake_up_check() from timer interrupt */
961 if (request_irq(TIMER0_INTR_VECT, gpio_poll_timer_interrupt,
962 IRQF_SHARED, "gpio poll", &alarmlist))
963 printk(KERN_ERR "timer0 irq for gpio\n");
964
965 if (request_irq(GIO_INTR_VECT, gpio_pa_interrupt,
966 IRQF_SHARED, "gpio PA", &alarmlist))
967 printk(KERN_ERR "PA irq for gpio\n");
968
969#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
970 virtual_gpio_init();
971#endif
972
973 return res;
974}
975
976/* this makes sure that gpio_init is called during kernel boot */
977
978module_init(gpio_init);
diff --git a/arch/cris/arch-v32/kernel/crisksyms.c b/arch/cris/arch-v32/kernel/crisksyms.c
index bde8d1a10cad..b0566350a840 100644
--- a/arch/cris/arch-v32/kernel/crisksyms.c
+++ b/arch/cris/arch-v32/kernel/crisksyms.c
@@ -3,7 +3,6 @@
3#include <arch/dma.h> 3#include <arch/dma.h>
4#include <arch/intmem.h> 4#include <arch/intmem.h>
5#include <mach/pinmux.h> 5#include <mach/pinmux.h>
6#include <arch/io.h>
7 6
8/* Functions for allocating DMA channels */ 7/* Functions for allocating DMA channels */
9EXPORT_SYMBOL(crisv32_request_dma); 8EXPORT_SYMBOL(crisv32_request_dma);
@@ -20,8 +19,6 @@ EXPORT_SYMBOL(crisv32_pinmux_alloc);
20EXPORT_SYMBOL(crisv32_pinmux_alloc_fixed); 19EXPORT_SYMBOL(crisv32_pinmux_alloc_fixed);
21EXPORT_SYMBOL(crisv32_pinmux_dealloc); 20EXPORT_SYMBOL(crisv32_pinmux_dealloc);
22EXPORT_SYMBOL(crisv32_pinmux_dealloc_fixed); 21EXPORT_SYMBOL(crisv32_pinmux_dealloc_fixed);
23EXPORT_SYMBOL(crisv32_io_get_name);
24EXPORT_SYMBOL(crisv32_io_get);
25 22
26/* Functions masking/unmasking interrupts */ 23/* Functions masking/unmasking interrupts */
27EXPORT_SYMBOL(crisv32_mask_irq); 24EXPORT_SYMBOL(crisv32_mask_irq);
diff --git a/arch/cris/arch-v32/kernel/debugport.c b/arch/cris/arch-v32/kernel/debugport.c
index 02e33ebe51ec..d2f3f9c37102 100644
--- a/arch/cris/arch-v32/kernel/debugport.c
+++ b/arch/cris/arch-v32/kernel/debugport.c
@@ -77,8 +77,6 @@ static struct dbg_port *port =
77 &ports[2]; 77 &ports[2];
78#elif defined(CONFIG_ETRAX_DEBUG_PORT3) 78#elif defined(CONFIG_ETRAX_DEBUG_PORT3)
79 &ports[3]; 79 &ports[3];
80#elif defined(CONFIG_ETRAX_DEBUG_PORT4)
81 &ports[4];
82#else 80#else
83 NULL; 81 NULL;
84#endif 82#endif
diff --git a/arch/cris/arch-v32/kernel/head.S b/arch/cris/arch-v32/kernel/head.S
index 74a66e0e3777..ea6366800df7 100644
--- a/arch/cris/arch-v32/kernel/head.S
+++ b/arch/cris/arch-v32/kernel/head.S
@@ -292,11 +292,7 @@ _no_romfs_in_flash:
292 ;; For cramfs, partition starts with magic and length. 292 ;; For cramfs, partition starts with magic and length.
293 ;; For jffs2, a jhead is prepended which contains with magic and length. 293 ;; For jffs2, a jhead is prepended which contains with magic and length.
294 ;; The jhead is not part of the jffs2 partition however. 294 ;; The jhead is not part of the jffs2 partition however.
295#ifndef CONFIG_ETRAXFS_SIM
296 move.d __bss_start, $r0 295 move.d __bss_start, $r0
297#else
298 move.d __end, $r0
299#endif
300 move.d [$r0], $r1 296 move.d [$r0], $r1
301 cmp.d CRAMFS_MAGIC, $r1 ; cramfs magic? 297 cmp.d CRAMFS_MAGIC, $r1 ; cramfs magic?
302 beq 2f ; yes, jump 298 beq 2f ; yes, jump
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 6a881e0e92b4..6de8db67cb09 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -37,7 +37,7 @@
37#define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ)) 37#define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ))
38#elif defined(CONFIG_ETRAX_KGDB_PORT1) 38#elif defined(CONFIG_ETRAX_KGDB_PORT1)
39#define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ)) 39#define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ))
40#elif defined(CONFIG_ETRAX_KGB_PORT2) 40#elif defined(CONFIG_ETRAX_KGDB_PORT2)
41#define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ)) 41#define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ))
42#elif defined(CONFIG_ETRAX_KGDB_PORT3) 42#elif defined(CONFIG_ETRAX_KGDB_PORT3)
43#define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ)) 43#define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ))
@@ -464,14 +464,14 @@ init_IRQ(void)
464 etrax_irv->v[i] = weird_irq; 464 etrax_irv->v[i] = weird_irq;
465 465
466 np = of_find_compatible_node(NULL, NULL, "axis,crisv32-intc"); 466 np = of_find_compatible_node(NULL, NULL, "axis,crisv32-intc");
467 domain = irq_domain_add_legacy(np, NR_IRQS - FIRST_IRQ, 467 domain = irq_domain_add_legacy(np, NBR_INTR_VECT - FIRST_IRQ,
468 FIRST_IRQ, FIRST_IRQ, 468 FIRST_IRQ, FIRST_IRQ,
469 &crisv32_irq_ops, NULL); 469 &crisv32_irq_ops, NULL);
470 BUG_ON(!domain); 470 BUG_ON(!domain);
471 irq_set_default_host(domain); 471 irq_set_default_host(domain);
472 of_node_put(np); 472 of_node_put(np);
473 473
474 for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) { 474 for (i = FIRST_IRQ, j = 0; j < NBR_INTR_VECT; i++, j++) {
475 set_exception_vector(i, interrupt[j]); 475 set_exception_vector(i, interrupt[j]);
476 } 476 }
477 477
diff --git a/arch/cris/arch-v32/kernel/kgdb.c b/arch/cris/arch-v32/kernel/kgdb.c
index b06813aeb120..e0fdea706eca 100644
--- a/arch/cris/arch-v32/kernel/kgdb.c
+++ b/arch/cris/arch-v32/kernel/kgdb.c
@@ -384,19 +384,11 @@ int getDebugChar(void);
384/* Serial port, writes one character. ETRAX 100 specific. from debugport.c */ 384/* Serial port, writes one character. ETRAX 100 specific. from debugport.c */
385void putDebugChar(int val); 385void putDebugChar(int val);
386 386
387/* Returns the integer equivalent of a hexadecimal character. */
388static int hex(char ch);
389
390/* Convert the memory, pointed to by mem into hexadecimal representation. 387/* Convert the memory, pointed to by mem into hexadecimal representation.
391 Put the result in buf, and return a pointer to the last character 388 Put the result in buf, and return a pointer to the last character
392 in buf (null). */ 389 in buf (null). */
393static char *mem2hex(char *buf, unsigned char *mem, int count); 390static char *mem2hex(char *buf, unsigned char *mem, int count);
394 391
395/* Convert the array, in hexadecimal representation, pointed to by buf into
396 binary representation. Put the result in mem, and return a pointer to
397 the character after the last byte written. */
398static unsigned char *hex2mem(unsigned char *mem, char *buf, int count);
399
400/* Put the content of the array, in binary representation, pointed to by buf 392/* Put the content of the array, in binary representation, pointed to by buf
401 into memory pointed to by mem, and return a pointer to 393 into memory pointed to by mem, and return a pointer to
402 the character after the last byte written. */ 394 the character after the last byte written. */
@@ -449,7 +441,7 @@ static char output_buffer[BUFMAX];
449/* Error and warning messages. */ 441/* Error and warning messages. */
450enum error_type 442enum error_type
451{ 443{
452 SUCCESS, E01, E02, E03, E04, E05, E06, 444 SUCCESS, E01, E02, E03, E04, E05, E06, E07, E08
453}; 445};
454 446
455static char *error_message[] = 447static char *error_message[] =
@@ -461,6 +453,8 @@ static char *error_message[] =
461 "E04 The command is not supported - [s,C,S,!,R,d,r] - internal error.", 453 "E04 The command is not supported - [s,C,S,!,R,d,r] - internal error.",
462 "E05 Change register content - P - the register is not implemented..", 454 "E05 Change register content - P - the register is not implemented..",
463 "E06 Change memory content - M - internal error.", 455 "E06 Change memory content - M - internal error.",
456 "E07 Change register content - P - the register is not stored on the stack",
457 "E08 Invalid parameter"
464}; 458};
465 459
466/********************************** Breakpoint *******************************/ 460/********************************** Breakpoint *******************************/
@@ -539,7 +533,7 @@ gdb_cris_strtol(const char *s, char **endptr, int base)
539/********************************* Register image ****************************/ 533/********************************* Register image ****************************/
540 534
541/* Write a value to a specified register in the register image of the current 535/* Write a value to a specified register in the register image of the current
542 thread. Returns status code SUCCESS, E02 or E05. */ 536 thread. Returns status code SUCCESS, E02, E05 or E08. */
543static int 537static int
544write_register(int regno, char *val) 538write_register(int regno, char *val)
545{ 539{
@@ -547,8 +541,9 @@ write_register(int regno, char *val)
547 541
548 if (regno >= R0 && regno <= ACR) { 542 if (regno >= R0 && regno <= ACR) {
549 /* Consecutive 32-bit registers. */ 543 /* Consecutive 32-bit registers. */
550 hex2mem((unsigned char *)&reg.r0 + (regno - R0) * sizeof(unsigned int), 544 if (hex2bin((unsigned char *)&reg.r0 + (regno - R0) * sizeof(unsigned int),
551 val, sizeof(unsigned int)); 545 val, sizeof(unsigned int)))
546 status = E08;
552 547
553 } else if (regno == BZ || regno == VR || regno == WZ || regno == DZ) { 548 } else if (regno == BZ || regno == VR || regno == WZ || regno == DZ) {
554 /* Read-only registers. */ 549 /* Read-only registers. */
@@ -557,16 +552,19 @@ write_register(int regno, char *val)
557 } else if (regno == PID) { 552 } else if (regno == PID) {
558 /* 32-bit register. (Even though we already checked SRS and WZ, we cannot 553 /* 32-bit register. (Even though we already checked SRS and WZ, we cannot
559 combine this with the EXS - SPC write since SRS and WZ have different size.) */ 554 combine this with the EXS - SPC write since SRS and WZ have different size.) */
560 hex2mem((unsigned char *)&reg.pid, val, sizeof(unsigned int)); 555 if (hex2bin((unsigned char *)&reg.pid, val, sizeof(unsigned int)))
556 status = E08;
561 557
562 } else if (regno == SRS) { 558 } else if (regno == SRS) {
563 /* 8-bit register. */ 559 /* 8-bit register. */
564 hex2mem((unsigned char *)&reg.srs, val, sizeof(unsigned char)); 560 if (hex2bin((unsigned char *)&reg.srs, val, sizeof(unsigned char)))
561 status = E08;
565 562
566 } else if (regno >= EXS && regno <= SPC) { 563 } else if (regno >= EXS && regno <= SPC) {
567 /* Consecutive 32-bit registers. */ 564 /* Consecutive 32-bit registers. */
568 hex2mem((unsigned char *)&reg.exs + (regno - EXS) * sizeof(unsigned int), 565 if (hex2bin((unsigned char *)&reg.exs + (regno - EXS) * sizeof(unsigned int),
569 val, sizeof(unsigned int)); 566 val, sizeof(unsigned int)))
567 status = E08;
570 568
571 } else if (regno == PC) { 569 } else if (regno == PC) {
572 /* Pseudo-register. Treat as read-only. */ 570 /* Pseudo-register. Treat as read-only. */
@@ -574,7 +572,9 @@ write_register(int regno, char *val)
574 572
575 } else if (regno >= S0 && regno <= S15) { 573 } else if (regno >= S0 && regno <= S15) {
576 /* 32-bit registers. */ 574 /* 32-bit registers. */
577 hex2mem((unsigned char *)&sreg.s0_0 + (reg.srs * 16 * sizeof(unsigned int)) + (regno - S0) * sizeof(unsigned int), val, sizeof(unsigned int)); 575 if (hex2bin((unsigned char *)&sreg.s0_0 + (reg.srs * 16 * sizeof(unsigned int)) + (regno - S0) * sizeof(unsigned int),
576 val, sizeof(unsigned int)))
577 status = E08;
578 } else { 578 } else {
579 /* Non-existing register. */ 579 /* Non-existing register. */
580 status = E05; 580 status = E05;
@@ -630,19 +630,6 @@ read_register(char regno, unsigned int *valptr)
630} 630}
631 631
632/********************************** Packet I/O ******************************/ 632/********************************** Packet I/O ******************************/
633/* Returns the integer equivalent of a hexadecimal character. */
634static int
635hex(char ch)
636{
637 if ((ch >= 'a') && (ch <= 'f'))
638 return (ch - 'a' + 10);
639 if ((ch >= '0') && (ch <= '9'))
640 return (ch - '0');
641 if ((ch >= 'A') && (ch <= 'F'))
642 return (ch - 'A' + 10);
643 return -1;
644}
645
646/* Convert the memory, pointed to by mem into hexadecimal representation. 633/* Convert the memory, pointed to by mem into hexadecimal representation.
647 Put the result in buf, and return a pointer to the last character 634 Put the result in buf, and return a pointer to the last character
648 in buf (null). */ 635 in buf (null). */
@@ -689,22 +676,6 @@ mem2hex_nbo(char *buf, unsigned char *mem, int count)
689 return buf; 676 return buf;
690} 677}
691 678
692/* Convert the array, in hexadecimal representation, pointed to by buf into
693 binary representation. Put the result in mem, and return a pointer to
694 the character after the last byte written. */
695static unsigned char*
696hex2mem(unsigned char *mem, char *buf, int count)
697{
698 int i;
699 unsigned char ch;
700 for (i = 0; i < count; i++) {
701 ch = hex (*buf++) << 4;
702 ch = ch + hex (*buf++);
703 *mem++ = ch;
704 }
705 return mem;
706}
707
708/* Put the content of the array, in binary representation, pointed to by buf 679/* Put the content of the array, in binary representation, pointed to by buf
709 into memory pointed to by mem, and return a pointer to the character after 680 into memory pointed to by mem, and return a pointer to the character after
710 the last byte written. 681 the last byte written.
@@ -763,8 +734,8 @@ getpacket(char *buffer)
763 buffer[count] = 0; 734 buffer[count] = 0;
764 735
765 if (ch == '#') { 736 if (ch == '#') {
766 xmitcsum = hex(getDebugChar()) << 4; 737 xmitcsum = hex_to_bin(getDebugChar()) << 4;
767 xmitcsum += hex(getDebugChar()); 738 xmitcsum += hex_to_bin(getDebugChar());
768 if (checksum != xmitcsum) { 739 if (checksum != xmitcsum) {
769 /* Wrong checksum */ 740 /* Wrong checksum */
770 putDebugChar('-'); 741 putDebugChar('-');
@@ -1304,14 +1275,17 @@ handle_exception(int sigval)
1304 /* Write registers. GXX..XX 1275 /* Write registers. GXX..XX
1305 Each byte of register data is described by two hex digits. 1276 Each byte of register data is described by two hex digits.
1306 Success: OK 1277 Success: OK
1307 Failure: void. */ 1278 Failure: E08. */
1308 /* General and special registers. */ 1279 /* General and special registers. */
1309 hex2mem((char *)&reg, &input_buffer[1], sizeof(registers)); 1280 if (hex2bin((char *)&reg, &input_buffer[1], sizeof(registers)))
1281 gdb_cris_strcpy(output_buffer, error_message[E08]);
1310 /* Support registers. */ 1282 /* Support registers. */
1311 hex2mem((char *)&sreg + (reg.srs * 16 * sizeof(unsigned int)), 1283 else if (hex2bin((char *)&sreg + (reg.srs * 16 * sizeof(unsigned int)),
1312 &input_buffer[1] + sizeof(registers), 1284 &input_buffer[1] + sizeof(registers),
1313 16 * sizeof(unsigned int)); 1285 16 * sizeof(unsigned int)))
1314 gdb_cris_strcpy(output_buffer, "OK"); 1286 gdb_cris_strcpy(output_buffer, error_message[E08]);
1287 else
1288 gdb_cris_strcpy(output_buffer, "OK");
1315 break; 1289 break;
1316 1290
1317 case 'P': 1291 case 'P':
@@ -1338,6 +1312,10 @@ handle_exception(int sigval)
1338 /* Do not support non-existing registers. */ 1312 /* Do not support non-existing registers. */
1339 gdb_cris_strcpy(output_buffer, error_message[E05]); 1313 gdb_cris_strcpy(output_buffer, error_message[E05]);
1340 break; 1314 break;
1315 case E08:
1316 /* Invalid parameter. */
1317 gdb_cris_strcpy(output_buffer, error_message[E08]);
1318 break;
1341 default: 1319 default:
1342 /* Valid register number. */ 1320 /* Valid register number. */
1343 gdb_cris_strcpy(output_buffer, "OK"); 1321 gdb_cris_strcpy(output_buffer, "OK");
@@ -1380,7 +1358,7 @@ handle_exception(int sigval)
1380 AA..AA is the start address, LLLL is the number of bytes, and 1358 AA..AA is the start address, LLLL is the number of bytes, and
1381 XX..XX is the hexadecimal data. 1359 XX..XX is the hexadecimal data.
1382 Success: OK 1360 Success: OK
1383 Failure: void. */ 1361 Failure: E08. */
1384 { 1362 {
1385 char *lenptr; 1363 char *lenptr;
1386 char *dataptr; 1364 char *dataptr;
@@ -1389,13 +1367,15 @@ handle_exception(int sigval)
1389 int len = gdb_cris_strtol(lenptr+1, &dataptr, 16); 1367 int len = gdb_cris_strtol(lenptr+1, &dataptr, 16);
1390 if (*lenptr == ',' && *dataptr == ':') { 1368 if (*lenptr == ',' && *dataptr == ':') {
1391 if (input_buffer[0] == 'M') { 1369 if (input_buffer[0] == 'M') {
1392 hex2mem(addr, dataptr + 1, len); 1370 if (hex2bin(addr, dataptr + 1, len))
1371 gdb_cris_strcpy(output_buffer, error_message[E08]);
1372 else
1373 gdb_cris_strcpy(output_buffer, "OK");
1393 } else /* X */ { 1374 } else /* X */ {
1394 bin2mem(addr, dataptr + 1, len); 1375 bin2mem(addr, dataptr + 1, len);
1376 gdb_cris_strcpy(output_buffer, "OK");
1395 } 1377 }
1396 gdb_cris_strcpy(output_buffer, "OK"); 1378 } else {
1397 }
1398 else {
1399 gdb_cris_strcpy(output_buffer, error_message[E06]); 1379 gdb_cris_strcpy(output_buffer, error_message[E06]);
1400 } 1380 }
1401 } 1381 }
diff --git a/arch/cris/arch-v32/kernel/setup.c b/arch/cris/arch-v32/kernel/setup.c
index cd1865d68b2e..fe50287aa928 100644
--- a/arch/cris/arch-v32/kernel/setup.c
+++ b/arch/cris/arch-v32/kernel/setup.c
@@ -129,10 +129,6 @@ static struct i2c_board_info __initdata i2c_info[] = {
129#ifdef CONFIG_RTC_DRV_PCF8563 129#ifdef CONFIG_RTC_DRV_PCF8563
130 {I2C_BOARD_INFO("pcf8563", 0x51)}, 130 {I2C_BOARD_INFO("pcf8563", 0x51)},
131#endif 131#endif
132#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
133 {I2C_BOARD_INFO("vgpio", 0x20)},
134 {I2C_BOARD_INFO("vgpio", 0x21)},
135#endif
136 {I2C_BOARD_INFO("pca9536", 0x41)}, 132 {I2C_BOARD_INFO("pca9536", 0x41)},
137 {I2C_BOARD_INFO("fnp300", 0x40)}, 133 {I2C_BOARD_INFO("fnp300", 0x40)},
138 {I2C_BOARD_INFO("fnp300", 0x42)}, 134 {I2C_BOARD_INFO("fnp300", 0x42)},
@@ -146,10 +142,6 @@ static struct i2c_board_info __initdata i2c_info2[] = {
146 {I2C_BOARD_INFO("tmp100", 0x4C)}, 142 {I2C_BOARD_INFO("tmp100", 0x4C)},
147 {I2C_BOARD_INFO("tmp100", 0x4D)}, 143 {I2C_BOARD_INFO("tmp100", 0x4D)},
148 {I2C_BOARD_INFO("tmp100", 0x4E)}, 144 {I2C_BOARD_INFO("tmp100", 0x4E)},
149#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
150 {I2C_BOARD_INFO("vgpio", 0x20)},
151 {I2C_BOARD_INFO("vgpio", 0x21)},
152#endif
153 {I2C_BOARD_INFO("pca9536", 0x41)}, 145 {I2C_BOARD_INFO("pca9536", 0x41)},
154 {I2C_BOARD_INFO("fnp300", 0x40)}, 146 {I2C_BOARD_INFO("fnp300", 0x40)},
155 {I2C_BOARD_INFO("fnp300", 0x42)}, 147 {I2C_BOARD_INFO("fnp300", 0x42)},
diff --git a/arch/cris/arch-v32/mach-a3/Makefile b/arch/cris/arch-v32/mach-a3/Makefile
index 18a227196a41..0cc6eebacbed 100644
--- a/arch/cris/arch-v32/mach-a3/Makefile
+++ b/arch/cris/arch-v32/mach-a3/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5obj-y := dma.o pinmux.o io.o arbiter.o 5obj-y := dma.o pinmux.o arbiter.o
6 6
7clean: 7clean:
8 8
diff --git a/arch/cris/arch-v32/mach-a3/io.c b/arch/cris/arch-v32/mach-a3/io.c
deleted file mode 100644
index 090ceb99ef0b..000000000000
--- a/arch/cris/arch-v32/mach-a3/io.c
+++ /dev/null
@@ -1,149 +0,0 @@
1/*
2 * Helper functions for I/O pins.
3 *
4 * Copyright (c) 2005-2007 Axis Communications AB.
5 */
6
7#include <linux/types.h>
8#include <linux/errno.h>
9#include <linux/init.h>
10#include <linux/string.h>
11#include <linux/ctype.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <asm/io.h>
15#include <mach/pinmux.h>
16#include <hwregs/gio_defs.h>
17
18struct crisv32_ioport crisv32_ioports[] = {
19 {
20 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_oe),
21 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_dout),
22 (unsigned long *)REG_ADDR(gio, regi_gio, r_pa_din),
23 32
24 },
25 {
26 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_oe),
27 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_dout),
28 (unsigned long *)REG_ADDR(gio, regi_gio, r_pb_din),
29 32
30 },
31 {
32 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_oe),
33 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_dout),
34 (unsigned long *)REG_ADDR(gio, regi_gio, r_pc_din),
35 16
36 },
37};
38
39#define NBR_OF_PORTS ARRAY_SIZE(crisv32_ioports)
40
41struct crisv32_iopin crisv32_led_net0_green;
42struct crisv32_iopin crisv32_led_net0_red;
43struct crisv32_iopin crisv32_led2_green;
44struct crisv32_iopin crisv32_led2_red;
45struct crisv32_iopin crisv32_led3_green;
46struct crisv32_iopin crisv32_led3_red;
47
48/* Dummy port used when green LED and red LED is on the same bit */
49static unsigned long io_dummy;
50static struct crisv32_ioport dummy_port = {
51 &io_dummy,
52 &io_dummy,
53 &io_dummy,
54 32
55};
56static struct crisv32_iopin dummy_led = {
57 &dummy_port,
58 0
59};
60
61static int __init crisv32_io_init(void)
62{
63 int ret = 0;
64
65 u32 i;
66
67 /* Locks *should* be dynamically initialized. */
68 for (i = 0; i < ARRAY_SIZE(crisv32_ioports); i++)
69 spin_lock_init(&crisv32_ioports[i].lock);
70 spin_lock_init(&dummy_port.lock);
71
72 /* Initialize LEDs */
73#if (defined(CONFIG_ETRAX_NBR_LED_GRP_ONE) || defined(CONFIG_ETRAX_NBR_LED_GRP_TWO))
74 ret += crisv32_io_get_name(&crisv32_led_net0_green,
75 CONFIG_ETRAX_LED_G_NET0);
76 crisv32_io_set_dir(&crisv32_led_net0_green, crisv32_io_dir_out);
77 if (strcmp(CONFIG_ETRAX_LED_G_NET0, CONFIG_ETRAX_LED_R_NET0)) {
78 ret += crisv32_io_get_name(&crisv32_led_net0_red,
79 CONFIG_ETRAX_LED_R_NET0);
80 crisv32_io_set_dir(&crisv32_led_net0_red, crisv32_io_dir_out);
81 } else
82 crisv32_led_net0_red = dummy_led;
83#endif
84
85 ret += crisv32_io_get_name(&crisv32_led2_green, CONFIG_ETRAX_V32_LED2G);
86 ret += crisv32_io_get_name(&crisv32_led2_red, CONFIG_ETRAX_V32_LED2R);
87 ret += crisv32_io_get_name(&crisv32_led3_green, CONFIG_ETRAX_V32_LED3G);
88 ret += crisv32_io_get_name(&crisv32_led3_red, CONFIG_ETRAX_V32_LED3R);
89
90 crisv32_io_set_dir(&crisv32_led2_green, crisv32_io_dir_out);
91 crisv32_io_set_dir(&crisv32_led2_red, crisv32_io_dir_out);
92 crisv32_io_set_dir(&crisv32_led3_green, crisv32_io_dir_out);
93 crisv32_io_set_dir(&crisv32_led3_red, crisv32_io_dir_out);
94
95 return ret;
96}
97
98__initcall(crisv32_io_init);
99
100int crisv32_io_get(struct crisv32_iopin *iopin,
101 unsigned int port, unsigned int pin)
102{
103 if (port > NBR_OF_PORTS)
104 return -EINVAL;
105 if (port > crisv32_ioports[port].pin_count)
106 return -EINVAL;
107
108 iopin->bit = 1 << pin;
109 iopin->port = &crisv32_ioports[port];
110
111 if (crisv32_pinmux_alloc(port, pin, pin, pinmux_gpio))
112 return -EIO;
113
114 return 0;
115}
116
117int crisv32_io_get_name(struct crisv32_iopin *iopin, const char *name)
118{
119 int port;
120 int pin;
121
122 if (toupper(*name) == 'P')
123 name++;
124
125 if (toupper(*name) < 'A' || toupper(*name) > 'E')
126 return -EINVAL;
127
128 port = toupper(*name) - 'A';
129 name++;
130 pin = simple_strtoul(name, NULL, 10);
131
132 if (pin < 0 || pin > crisv32_ioports[port].pin_count)
133 return -EINVAL;
134
135 iopin->bit = 1 << pin;
136 iopin->port = &crisv32_ioports[port];
137
138 if (crisv32_pinmux_alloc(port, pin, pin, pinmux_gpio))
139 return -EIO;
140
141 return 0;
142}
143
144#ifdef CONFIG_PCI
145/* PCI I/O access stuff */
146struct cris_io_operations *cris_iops = NULL;
147EXPORT_SYMBOL(cris_iops);
148#endif
149
diff --git a/arch/cris/arch-v32/mach-fs/Kconfig b/arch/cris/arch-v32/mach-fs/Kconfig
index 774de82abef6..7d1ab972bc0f 100644
--- a/arch/cris/arch-v32/mach-fs/Kconfig
+++ b/arch/cris/arch-v32/mach-fs/Kconfig
@@ -192,25 +192,6 @@ config ETRAX_DEF_GIO_PE_OUT
192 Configures the initial data for the general port E bits. Most 192 Configures the initial data for the general port E bits. Most
193 products should use 00000 here. 193 products should use 00000 here.
194 194
195config ETRAX_DEF_GIO_PV_OE
196 hex "GIO_PV_OE"
197 depends on ETRAX_VIRTUAL_GPIO
198 default "0000"
199 help
200 Configures the direction of virtual general port V bits. 1 is out,
201 0 is in. This is often totally different depending on the product
202 used. These bits are used for all kinds of stuff. If you don't know
203 what to use, it is always safe to put all as inputs, although
204 floating inputs isn't good.
205
206config ETRAX_DEF_GIO_PV_OUT
207 hex "GIO_PV_OUT"
208 depends on ETRAX_VIRTUAL_GPIO
209 default "0000"
210 help
211 Configures the initial data for the virtual general port V bits.
212 Most products should use 0000 here.
213
214endmenu 195endmenu
215 196
216endif 197endif
diff --git a/arch/cris/arch-v32/mach-fs/Makefile b/arch/cris/arch-v32/mach-fs/Makefile
index 18a227196a41..0cc6eebacbed 100644
--- a/arch/cris/arch-v32/mach-fs/Makefile
+++ b/arch/cris/arch-v32/mach-fs/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5obj-y := dma.o pinmux.o io.o arbiter.o 5obj-y := dma.o pinmux.o arbiter.o
6 6
7clean: 7clean:
8 8
diff --git a/arch/cris/arch-v32/mach-fs/io.c b/arch/cris/arch-v32/mach-fs/io.c
deleted file mode 100644
index a6958661fa8e..000000000000
--- a/arch/cris/arch-v32/mach-fs/io.c
+++ /dev/null
@@ -1,191 +0,0 @@
1/*
2 * Helper functions for I/O pins.
3 *
4 * Copyright (c) 2004-2007 Axis Communications AB.
5 */
6
7#include <linux/types.h>
8#include <linux/errno.h>
9#include <linux/init.h>
10#include <linux/string.h>
11#include <linux/ctype.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <asm/io.h>
15#include <mach/pinmux.h>
16#include <hwregs/gio_defs.h>
17
18#ifndef DEBUG
19#define DEBUG(x)
20#endif
21
22struct crisv32_ioport crisv32_ioports[] = {
23 {
24 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_oe),
25 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_dout),
26 (unsigned long *)REG_ADDR(gio, regi_gio, r_pa_din),
27 8
28 },
29 {
30 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_oe),
31 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_dout),
32 (unsigned long *)REG_ADDR(gio, regi_gio, r_pb_din),
33 18
34 },
35 {
36 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_oe),
37 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_dout),
38 (unsigned long *)REG_ADDR(gio, regi_gio, r_pc_din),
39 18
40 },
41 {
42 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pd_oe),
43 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pd_dout),
44 (unsigned long *)REG_ADDR(gio, regi_gio, r_pd_din),
45 18
46 },
47 {
48 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pe_oe),
49 (unsigned long *)REG_ADDR(gio, regi_gio, rw_pe_dout),
50 (unsigned long *)REG_ADDR(gio, regi_gio, r_pe_din),
51 18
52 }
53};
54
55#define NBR_OF_PORTS ARRAY_SIZE(crisv32_ioports)
56
57struct crisv32_iopin crisv32_led_net0_green;
58struct crisv32_iopin crisv32_led_net0_red;
59struct crisv32_iopin crisv32_led_net1_green;
60struct crisv32_iopin crisv32_led_net1_red;
61struct crisv32_iopin crisv32_led2_green;
62struct crisv32_iopin crisv32_led2_red;
63struct crisv32_iopin crisv32_led3_green;
64struct crisv32_iopin crisv32_led3_red;
65
66/* Dummy port used when green LED and red LED is on the same bit */
67static unsigned long io_dummy;
68static struct crisv32_ioport dummy_port = {
69 &io_dummy,
70 &io_dummy,
71 &io_dummy,
72 18
73};
74static struct crisv32_iopin dummy_led = {
75 &dummy_port,
76 0
77};
78
79static int __init crisv32_io_init(void)
80{
81 int ret = 0;
82
83 u32 i;
84
85 /* Locks *should* be dynamically initialized. */
86 for (i = 0; i < ARRAY_SIZE(crisv32_ioports); i++)
87 spin_lock_init(&crisv32_ioports[i].lock);
88 spin_lock_init(&dummy_port.lock);
89
90 /* Initialize LEDs */
91#if (defined(CONFIG_ETRAX_NBR_LED_GRP_ONE) || defined(CONFIG_ETRAX_NBR_LED_GRP_TWO))
92 ret +=
93 crisv32_io_get_name(&crisv32_led_net0_green,
94 CONFIG_ETRAX_LED_G_NET0);
95 crisv32_io_set_dir(&crisv32_led_net0_green, crisv32_io_dir_out);
96 if (strcmp(CONFIG_ETRAX_LED_G_NET0, CONFIG_ETRAX_LED_R_NET0)) {
97 ret +=
98 crisv32_io_get_name(&crisv32_led_net0_red,
99 CONFIG_ETRAX_LED_R_NET0);
100 crisv32_io_set_dir(&crisv32_led_net0_red, crisv32_io_dir_out);
101 } else
102 crisv32_led_net0_red = dummy_led;
103#endif
104
105#ifdef CONFIG_ETRAX_NBR_LED_GRP_TWO
106 ret +=
107 crisv32_io_get_name(&crisv32_led_net1_green,
108 CONFIG_ETRAX_LED_G_NET1);
109 crisv32_io_set_dir(&crisv32_led_net1_green, crisv32_io_dir_out);
110 if (strcmp(CONFIG_ETRAX_LED_G_NET1, CONFIG_ETRAX_LED_R_NET1)) {
111 crisv32_io_get_name(&crisv32_led_net1_red,
112 CONFIG_ETRAX_LED_R_NET1);
113 crisv32_io_set_dir(&crisv32_led_net1_red, crisv32_io_dir_out);
114 } else
115 crisv32_led_net1_red = dummy_led;
116#endif
117
118 ret += crisv32_io_get_name(&crisv32_led2_green, CONFIG_ETRAX_V32_LED2G);
119 ret += crisv32_io_get_name(&crisv32_led2_red, CONFIG_ETRAX_V32_LED2R);
120 ret += crisv32_io_get_name(&crisv32_led3_green, CONFIG_ETRAX_V32_LED3G);
121 ret += crisv32_io_get_name(&crisv32_led3_red, CONFIG_ETRAX_V32_LED3R);
122
123 crisv32_io_set_dir(&crisv32_led2_green, crisv32_io_dir_out);
124 crisv32_io_set_dir(&crisv32_led2_red, crisv32_io_dir_out);
125 crisv32_io_set_dir(&crisv32_led3_green, crisv32_io_dir_out);
126 crisv32_io_set_dir(&crisv32_led3_red, crisv32_io_dir_out);
127
128 return ret;
129}
130
131__initcall(crisv32_io_init);
132
133int crisv32_io_get(struct crisv32_iopin *iopin,
134 unsigned int port, unsigned int pin)
135{
136 if (port > NBR_OF_PORTS)
137 return -EINVAL;
138 if (port > crisv32_ioports[port].pin_count)
139 return -EINVAL;
140
141 iopin->bit = 1 << pin;
142 iopin->port = &crisv32_ioports[port];
143
144 /* Only allocate pinmux gpiopins if port != PORT_A (port 0) */
145 /* NOTE! crisv32_pinmux_alloc thinks PORT_B is port 0 */
146 if (port != 0 && crisv32_pinmux_alloc(port - 1, pin, pin, pinmux_gpio))
147 return -EIO;
148 DEBUG(printk(KERN_DEBUG "crisv32_io_get: Allocated pin %d on port %d\n",
149 pin, port));
150
151 return 0;
152}
153
154int crisv32_io_get_name(struct crisv32_iopin *iopin, const char *name)
155{
156 int port;
157 int pin;
158
159 if (toupper(*name) == 'P')
160 name++;
161
162 if (toupper(*name) < 'A' || toupper(*name) > 'E')
163 return -EINVAL;
164
165 port = toupper(*name) - 'A';
166 name++;
167 pin = simple_strtoul(name, NULL, 10);
168
169 if (pin < 0 || pin > crisv32_ioports[port].pin_count)
170 return -EINVAL;
171
172 iopin->bit = 1 << pin;
173 iopin->port = &crisv32_ioports[port];
174
175 /* Only allocate pinmux gpiopins if port != PORT_A (port 0) */
176 /* NOTE! crisv32_pinmux_alloc thinks PORT_B is port 0 */
177 if (port != 0 && crisv32_pinmux_alloc(port - 1, pin, pin, pinmux_gpio))
178 return -EIO;
179
180 DEBUG(printk(KERN_DEBUG
181 "crisv32_io_get_name: Allocated pin %d on port %d\n",
182 pin, port));
183
184 return 0;
185}
186
187#ifdef CONFIG_PCI
188/* PCI I/O access stuff */
189struct cris_io_operations *cris_iops = NULL;
190EXPORT_SYMBOL(cris_iops);
191#endif
diff --git a/arch/cris/boot/dts/artpec3.dtsi b/arch/cris/boot/dts/artpec3.dtsi
new file mode 100644
index 000000000000..be15be67b653
--- /dev/null
+++ b/arch/cris/boot/dts/artpec3.dtsi
@@ -0,0 +1,46 @@
1/ {
2 #address-cells = <1>;
3 #size-cells = <1>;
4 interrupt-parent = <&intc>;
5
6 cpus {
7 #address-cells = <1>;
8 #size-cells = <0>;
9
10 cpu@0 {
11 device_type = "cpu";
12 model = "axis,crisv32";
13 reg = <0>;
14 };
15 };
16
17 soc {
18 compatible = "simple-bus";
19 model = "artpec3";
20 #address-cells = <1>;
21 #size-cells = <1>;
22 ranges;
23
24 intc: interrupt-controller {
25 compatible = "axis,crisv32-intc";
26 reg = <0xb002a000 0x1000>;
27 interrupt-controller;
28 #interrupt-cells = <1>;
29 };
30
31 gio: gpio@b0020000 {
32 compatible = "axis,artpec3-gio";
33 reg = <0xb0020000 0x1000>;
34 interrupts = <61>;
35 gpio-controller;
36 #gpio-cells = <3>;
37 };
38
39 serial@b003e000 {
40 compatible = "axis,etraxfs-uart";
41 reg = <0xb003e000 0x1000>;
42 interrupts = <64>;
43 status = "disabled";
44 };
45 };
46};
diff --git a/arch/cris/boot/dts/dev88.dts b/arch/cris/boot/dts/dev88.dts
index 4fa5a3f9d0ec..b9a230d10874 100644
--- a/arch/cris/boot/dts/dev88.dts
+++ b/arch/cris/boot/dts/dev88.dts
@@ -1,5 +1,7 @@
1/dts-v1/; 1/dts-v1/;
2 2
3#include <dt-bindings/gpio/gpio.h>
4
3/include/ "etraxfs.dtsi" 5/include/ "etraxfs.dtsi"
4 6
5/ { 7/ {
@@ -15,4 +17,51 @@
15 status = "okay"; 17 status = "okay";
16 }; 18 };
17 }; 19 };
20
21 spi {
22 compatible = "spi-gpio";
23 #address-cells = <1>;
24 #size-cells = <0>;
25
26 gpio-sck = <&gio 1 0 0xd>;
27 gpio-miso = <&gio 4 0 0xd>;
28 gpio-mosi = <&gio 0 0 0xd>;
29 cs-gpios = <&gio 3 0 0xd>;
30 num-chipselects = <1>;
31
32 temp-sensor@0 {
33 compatible = "ti,lm70";
34 reg = <0>;
35
36 spi-max-frequency = <100000>;
37 };
38 };
39
40 i2c {
41 compatible = "i2c-gpio";
42 gpios = <&gio 5 0 0xd>, <&gio 6 0 0xd>;
43 i2c-gpio,delay-us = <2>;
44 #address-cells = <1>;
45 #size-cells = <0>;
46
47 rtc@51 {
48 compatible = "nxp,pcf8563";
49 reg = <0x51>;
50 };
51 };
52
53 leds {
54 compatible = "gpio-leds";
55
56 network {
57 label = "network";
58 gpios = <&gio 2 GPIO_ACTIVE_LOW 0xa>;
59 };
60
61 status {
62 label = "status";
63 gpios = <&gio 3 GPIO_ACTIVE_LOW 0xa>;
64 linux,default-trigger = "heartbeat";
65 };
66 };
18}; 67};
diff --git a/arch/cris/boot/dts/etraxfs.dtsi b/arch/cris/boot/dts/etraxfs.dtsi
index 909bcedc3565..bf1b8582d4d8 100644
--- a/arch/cris/boot/dts/etraxfs.dtsi
+++ b/arch/cris/boot/dts/etraxfs.dtsi
@@ -28,6 +28,14 @@
28 #interrupt-cells = <1>; 28 #interrupt-cells = <1>;
29 }; 29 };
30 30
31 gio: gpio@b001a000 {
32 compatible = "axis,etraxfs-gio";
33 reg = <0xb001a000 0x1000>;
34 interrupts = <50>;
35 gpio-controller;
36 #gpio-cells = <3>;
37 };
38
31 serial@b00260000 { 39 serial@b00260000 {
32 compatible = "axis,etraxfs-uart"; 40 compatible = "axis,etraxfs-uart";
33 reg = <0xb0026000 0x1000>; 41 reg = <0xb0026000 0x1000>;
diff --git a/arch/cris/boot/dts/include/dt-bindings b/arch/cris/boot/dts/include/dt-bindings
new file mode 120000
index 000000000000..08c00e4972fa
--- /dev/null
+++ b/arch/cris/boot/dts/include/dt-bindings
@@ -0,0 +1 @@
../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/cris/boot/dts/p1343.dts b/arch/cris/boot/dts/p1343.dts
new file mode 100644
index 000000000000..fab7bdbd0f15
--- /dev/null
+++ b/arch/cris/boot/dts/p1343.dts
@@ -0,0 +1,76 @@
1/dts-v1/;
2
3#include <dt-bindings/gpio/gpio.h>
4#include <dt-bindings/input/input.h>
5
6/include/ "artpec3.dtsi"
7
8/ {
9 model = "Axis P1343 Network Camera";
10 compatible = "axis,p1343";
11
12 aliases {
13 serial0 = &uart0;
14 };
15
16 soc {
17 uart0: serial@b003e000 {
18 status = "okay";
19 };
20 };
21
22 i2c {
23 compatible = "i2c-gpio";
24 gpios = <&gio 3 0 0xa>, <&gio 2 0 0xa>;
25 i2c-gpio,delay-us = <2>;
26 #address-cells = <1>;
27 #size-cells = <0>;
28
29 rtc@51 {
30 compatible = "nxp,pcf8563";
31 reg = <0x51>;
32 };
33 };
34
35 leds {
36 compatible = "gpio-leds";
37
38 status_green {
39 label = "status:green";
40 gpios = <&gio 0 GPIO_ACTIVE_LOW 0xc>;
41 linux,default-trigger = "heartbeat";
42 };
43
44 status_red {
45 label = "status:red";
46 gpios = <&gio 1 GPIO_ACTIVE_LOW 0xc>;
47 };
48
49 network_green {
50 label = "network:green";
51 gpios = <&gio 2 GPIO_ACTIVE_LOW 0xc>;
52 };
53
54 network_red {
55 label = "network:red";
56 gpios = <&gio 3 GPIO_ACTIVE_LOW 0xc>;
57 };
58
59 power_red {
60 label = "power:red";
61 gpios = <&gio 4 GPIO_ACTIVE_LOW 0xc>;
62 };
63 };
64
65 gpio_keys {
66 compatible = "gpio-keys";
67 #address-cells = <1>;
68 #size-cells = <0>;
69
70 activity-button@0 {
71 label = "Activity Button";
72 linux,code = <KEY_FN>;
73 gpios = <&gio 13 GPIO_ACTIVE_LOW 0xd>;
74 };
75 };
76};
diff --git a/arch/cris/boot/rescue/head_v10.S b/arch/cris/boot/rescue/head_v10.S
index af55df0994b3..1c05492f3eb2 100644
--- a/arch/cris/boot/rescue/head_v10.S
+++ b/arch/cris/boot/rescue/head_v10.S
@@ -281,9 +281,6 @@ wait_ser:
281#ifdef CONFIG_ETRAX_PB_LEDS 281#ifdef CONFIG_ETRAX_PB_LEDS
282 move.b $r2, [R_PORT_PB_DATA] 282 move.b $r2, [R_PORT_PB_DATA]
283#endif 283#endif
284#ifdef CONFIG_ETRAX_90000000_LEDS
285 move.b $r2, [0x90000000]
286#endif
287#endif 284#endif
288 285
289 ;; check if we got something on the serial port 286 ;; check if we got something on the serial port
diff --git a/arch/cris/include/arch-v32/arch/io.h b/arch/cris/include/arch-v32/arch/io.h
deleted file mode 100644
index adc5484351bf..000000000000
--- a/arch/cris/include/arch-v32/arch/io.h
+++ /dev/null
@@ -1,140 +0,0 @@
1#ifndef _ASM_ARCH_CRIS_IO_H
2#define _ASM_ARCH_CRIS_IO_H
3
4#include <linux/spinlock.h>
5#include <hwregs/reg_map.h>
6#include <hwregs/reg_rdwr.h>
7#include <hwregs/gio_defs.h>
8
9enum crisv32_io_dir
10{
11 crisv32_io_dir_in = 0,
12 crisv32_io_dir_out = 1
13};
14
15struct crisv32_ioport
16{
17 volatile unsigned long *oe;
18 volatile unsigned long *data;
19 volatile unsigned long *data_in;
20 unsigned int pin_count;
21 spinlock_t lock;
22};
23
24struct crisv32_iopin
25{
26 struct crisv32_ioport* port;
27 int bit;
28};
29
30extern struct crisv32_ioport crisv32_ioports[];
31
32extern struct crisv32_iopin crisv32_led1_green;
33extern struct crisv32_iopin crisv32_led1_red;
34extern struct crisv32_iopin crisv32_led2_green;
35extern struct crisv32_iopin crisv32_led2_red;
36extern struct crisv32_iopin crisv32_led3_green;
37extern struct crisv32_iopin crisv32_led3_red;
38
39extern struct crisv32_iopin crisv32_led_net0_green;
40extern struct crisv32_iopin crisv32_led_net0_red;
41extern struct crisv32_iopin crisv32_led_net1_green;
42extern struct crisv32_iopin crisv32_led_net1_red;
43
44static inline void crisv32_io_set(struct crisv32_iopin *iopin, int val)
45{
46 unsigned long flags;
47 spin_lock_irqsave(&iopin->port->lock, flags);
48
49 if (iopin->port->data) {
50 if (val)
51 *iopin->port->data |= iopin->bit;
52 else
53 *iopin->port->data &= ~iopin->bit;
54 }
55
56 spin_unlock_irqrestore(&iopin->port->lock, flags);
57}
58
59static inline void crisv32_io_set_dir(struct crisv32_iopin* iopin,
60 enum crisv32_io_dir dir)
61{
62 unsigned long flags;
63 spin_lock_irqsave(&iopin->port->lock, flags);
64
65 if (iopin->port->oe) {
66 if (dir == crisv32_io_dir_in)
67 *iopin->port->oe &= ~iopin->bit;
68 else
69 *iopin->port->oe |= iopin->bit;
70 }
71
72 spin_unlock_irqrestore(&iopin->port->lock, flags);
73}
74
75static inline int crisv32_io_rd(struct crisv32_iopin* iopin)
76{
77 return ((*iopin->port->data_in & iopin->bit) ? 1 : 0);
78}
79
80int crisv32_io_get(struct crisv32_iopin* iopin,
81 unsigned int port, unsigned int pin);
82int crisv32_io_get_name(struct crisv32_iopin* iopin,
83 const char *name);
84
85#define CRIS_LED_OFF 0x00
86#define CRIS_LED_GREEN 0x01
87#define CRIS_LED_RED 0x02
88#define CRIS_LED_ORANGE (CRIS_LED_GREEN | CRIS_LED_RED)
89
90#if (defined(CONFIG_ETRAX_NBR_LED_GRP_ONE) || defined(CONFIG_ETRAX_NBR_LED_GRP_TWO))
91#define CRIS_LED_NETWORK_GRP0_SET(x) \
92 do { \
93 CRIS_LED_NETWORK_GRP0_SET_G((x) & CRIS_LED_GREEN); \
94 CRIS_LED_NETWORK_GRP0_SET_R((x) & CRIS_LED_RED); \
95 } while (0)
96#else
97#define CRIS_LED_NETWORK_GRP0_SET(x) while (0) {}
98#endif
99
100#define CRIS_LED_NETWORK_GRP0_SET_G(x) \
101 crisv32_io_set(&crisv32_led_net0_green, !(x));
102
103#define CRIS_LED_NETWORK_GRP0_SET_R(x) \
104 crisv32_io_set(&crisv32_led_net0_red, !(x));
105
106#if defined(CONFIG_ETRAX_NBR_LED_GRP_TWO)
107#define CRIS_LED_NETWORK_GRP1_SET(x) \
108 do { \
109 CRIS_LED_NETWORK_GRP1_SET_G((x) & CRIS_LED_GREEN); \
110 CRIS_LED_NETWORK_GRP1_SET_R((x) & CRIS_LED_RED); \
111 } while (0)
112#else
113#define CRIS_LED_NETWORK_GRP1_SET(x) while (0) {}
114#endif
115
116#define CRIS_LED_NETWORK_GRP1_SET_G(x) \
117 crisv32_io_set(&crisv32_led_net1_green, !(x));
118
119#define CRIS_LED_NETWORK_GRP1_SET_R(x) \
120 crisv32_io_set(&crisv32_led_net1_red, !(x));
121
122#define CRIS_LED_ACTIVE_SET(x) \
123 do { \
124 CRIS_LED_ACTIVE_SET_G((x) & CRIS_LED_GREEN); \
125 CRIS_LED_ACTIVE_SET_R((x) & CRIS_LED_RED); \
126 } while (0)
127
128#define CRIS_LED_ACTIVE_SET_G(x) \
129 crisv32_io_set(&crisv32_led2_green, !(x));
130#define CRIS_LED_ACTIVE_SET_R(x) \
131 crisv32_io_set(&crisv32_led2_red, !(x));
132#define CRIS_LED_DISK_WRITE(x) \
133 do{\
134 crisv32_io_set(&crisv32_led3_green, !(x)); \
135 crisv32_io_set(&crisv32_led3_red, !(x)); \
136 }while(0)
137#define CRIS_LED_DISK_READ(x) \
138 crisv32_io_set(&crisv32_led3_green, !(x));
139
140#endif
diff --git a/arch/cris/include/arch-v32/arch/irq.h b/arch/cris/include/arch-v32/arch/irq.h
index 0c1b4d3a34e7..8270a1bbfdb6 100644
--- a/arch/cris/include/arch-v32/arch/irq.h
+++ b/arch/cris/include/arch-v32/arch/irq.h
@@ -4,7 +4,7 @@
4#include <hwregs/intr_vect.h> 4#include <hwregs/intr_vect.h>
5 5
6/* Number of non-cpu interrupts. */ 6/* Number of non-cpu interrupts. */
7#define NR_IRQS NBR_INTR_VECT /* Exceptions + IRQs */ 7#define NR_IRQS (NBR_INTR_VECT + 256) /* Exceptions + IRQs */
8#define FIRST_IRQ 0x31 /* Exception number for first IRQ */ 8#define FIRST_IRQ 0x31 /* Exception number for first IRQ */
9#define NR_REAL_IRQS (NBR_INTR_VECT - FIRST_IRQ) /* IRQs */ 9#define NR_REAL_IRQS (NBR_INTR_VECT - FIRST_IRQ) /* IRQs */
10#if NR_REAL_IRQS > 32 10#if NR_REAL_IRQS > 32
diff --git a/arch/cris/include/asm/eshlibld.h b/arch/cris/include/asm/eshlibld.h
index 10ce36cf79a9..70aa448256b0 100644
--- a/arch/cris/include/asm/eshlibld.h
+++ b/arch/cris/include/asm/eshlibld.h
@@ -45,8 +45,7 @@
45 assumed that we want to share code when debugging (exposes more 45 assumed that we want to share code when debugging (exposes more
46 trouble). */ 46 trouble). */
47#ifndef SHARE_LIB_CORE 47#ifndef SHARE_LIB_CORE
48# if (defined(__KERNEL__) || !defined(RELOC_DEBUG)) \ 48# if (defined(__KERNEL__) || !defined(RELOC_DEBUG))
49 && !defined(CONFIG_SHARE_SHLIB_CORE)
50# define SHARE_LIB_CORE 0 49# define SHARE_LIB_CORE 0
51# else 50# else
52# define SHARE_LIB_CORE 1 51# define SHARE_LIB_CORE 1
diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
index 752a3f45df60..cce8664d5dd6 100644
--- a/arch/cris/include/asm/io.h
+++ b/arch/cris/include/asm/io.h
@@ -2,7 +2,9 @@
2#define _ASM_CRIS_IO_H 2#define _ASM_CRIS_IO_H
3 3
4#include <asm/page.h> /* for __va, __pa */ 4#include <asm/page.h> /* for __va, __pa */
5#ifdef CONFIG_ETRAX_ARCH_V10
5#include <arch/io.h> 6#include <arch/io.h>
7#endif
6#include <asm-generic/iomap.h> 8#include <asm-generic/iomap.h>
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8 10
diff --git a/arch/cris/include/uapi/asm/etraxgpio.h b/arch/cris/include/uapi/asm/etraxgpio.h
index 461c089db765..c6e7d57c8b24 100644
--- a/arch/cris/include/uapi/asm/etraxgpio.h
+++ b/arch/cris/include/uapi/asm/etraxgpio.h
@@ -11,26 +11,6 @@
11 * g1-g7 and g25-g31 is both input and outputs but on different pins 11 * g1-g7 and g25-g31 is both input and outputs but on different pins
12 * Also note that some bits change pins depending on what interfaces 12 * Also note that some bits change pins depending on what interfaces
13 * are enabled. 13 * are enabled.
14 *
15 * For ETRAX FS (CONFIG_ETRAXFS):
16 * /dev/gpioa minor 0, 8 bit GPIO, each bit can change direction
17 * /dev/gpiob minor 1, 18 bit GPIO, each bit can change direction
18 * /dev/gpioc minor 3, 18 bit GPIO, each bit can change direction
19 * /dev/gpiod minor 4, 18 bit GPIO, each bit can change direction
20 * /dev/gpioe minor 5, 18 bit GPIO, each bit can change direction
21 * /dev/leds minor 2, Access to leds depending on kernelconfig
22 *
23 * For ARTPEC-3 (CONFIG_CRIS_MACH_ARTPEC3):
24 * /dev/gpioa minor 0, 32 bit GPIO, each bit can change direction
25 * /dev/gpiob minor 1, 32 bit GPIO, each bit can change direction
26 * /dev/gpioc minor 3, 16 bit GPIO, each bit can change direction
27 * /dev/gpiod minor 4, 32 bit GPIO, input only
28 * /dev/leds minor 2, Access to leds depending on kernelconfig
29 * /dev/pwm0 minor 16, PWM channel 0 on PA30
30 * /dev/pwm1 minor 17, PWM channel 1 on PA31
31 * /dev/pwm2 minor 18, PWM channel 2 on PB26
32 * /dev/ppwm minor 19, PPWM channel
33 *
34 */ 14 */
35#ifndef _ASM_ETRAXGPIO_H 15#ifndef _ASM_ETRAXGPIO_H
36#define _ASM_ETRAXGPIO_H 16#define _ASM_ETRAXGPIO_H
@@ -40,52 +20,12 @@
40#define ETRAXGPIO_IOCTYPE 43 20#define ETRAXGPIO_IOCTYPE 43
41 21
42/* etraxgpio _IOC_TYPE, bits 8 to 15 in ioctl cmd */ 22/* etraxgpio _IOC_TYPE, bits 8 to 15 in ioctl cmd */
43#ifdef CONFIG_ETRAX_ARCH_V10
44#define GPIO_MINOR_A 0 23#define GPIO_MINOR_A 0
45#define GPIO_MINOR_B 1 24#define GPIO_MINOR_B 1
46#define GPIO_MINOR_LEDS 2 25#define GPIO_MINOR_LEDS 2
47#define GPIO_MINOR_G 3 26#define GPIO_MINOR_G 3
48#define GPIO_MINOR_LAST 3 27#define GPIO_MINOR_LAST 3
49#define GPIO_MINOR_LAST_REAL GPIO_MINOR_LAST 28#define GPIO_MINOR_LAST_REAL GPIO_MINOR_LAST
50#endif
51
52#ifdef CONFIG_ETRAXFS
53#define GPIO_MINOR_A 0
54#define GPIO_MINOR_B 1
55#define GPIO_MINOR_LEDS 2
56#define GPIO_MINOR_C 3
57#define GPIO_MINOR_D 4
58#define GPIO_MINOR_E 5
59#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
60#define GPIO_MINOR_V 6
61#define GPIO_MINOR_LAST 6
62#else
63#define GPIO_MINOR_LAST 5
64#endif
65#define GPIO_MINOR_LAST_REAL GPIO_MINOR_LAST
66#endif
67
68#ifdef CONFIG_CRIS_MACH_ARTPEC3
69#define GPIO_MINOR_A 0
70#define GPIO_MINOR_B 1
71#define GPIO_MINOR_LEDS 2
72#define GPIO_MINOR_C 3
73#define GPIO_MINOR_D 4
74#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
75#define GPIO_MINOR_V 6
76#define GPIO_MINOR_LAST 6
77#else
78#define GPIO_MINOR_LAST 4
79#endif
80#define GPIO_MINOR_FIRST_PWM 16
81#define GPIO_MINOR_PWM0 (GPIO_MINOR_FIRST_PWM+0)
82#define GPIO_MINOR_PWM1 (GPIO_MINOR_FIRST_PWM+1)
83#define GPIO_MINOR_PWM2 (GPIO_MINOR_FIRST_PWM+2)
84#define GPIO_MINOR_PPWM (GPIO_MINOR_FIRST_PWM+3)
85#define GPIO_MINOR_LAST_PWM GPIO_MINOR_PPWM
86#define GPIO_MINOR_LAST_REAL GPIO_MINOR_LAST_PWM
87#endif
88
89 29
90 30
91/* supported ioctl _IOC_NR's */ 31/* supported ioctl _IOC_NR's */
@@ -139,101 +79,4 @@
139#define IO_SETGET_OUTPUT 0x13 /* bits set in *arg is set to output, */ 79#define IO_SETGET_OUTPUT 0x13 /* bits set in *arg is set to output, */
140 /* *arg updated with current output pins. */ 80 /* *arg updated with current output pins. */
141 81
142/* The following ioctl's are applicable to the PWM channels only */
143
144#define IO_PWM_SET_MODE 0x20
145
146enum io_pwm_mode {
147 PWM_OFF = 0, /* disabled, deallocated */
148 PWM_STANDARD = 1, /* 390 kHz, duty cycle 0..255/256 */
149 PWM_FAST = 2, /* variable freq, w/ 10ns active pulse len */
150 PWM_VARFREQ = 3, /* individually configurable high/low periods */
151 PWM_SOFT = 4 /* software generated */
152};
153
154struct io_pwm_set_mode {
155 enum io_pwm_mode mode;
156};
157
158/* Only for mode PWM_VARFREQ. Period lo/high set in increments of 10ns
159 * from 10ns (value = 0) to 81920ns (value = 8191)
160 * (Resulting frequencies range from 50 MHz (10ns + 10ns) down to
161 * 6.1 kHz (81920ns + 81920ns) at 50% duty cycle, to 12.2 kHz at min/max duty
162 * cycle (81920 + 10ns or 10ns + 81920ns, respectively).)
163 */
164#define IO_PWM_SET_PERIOD 0x21
165
166struct io_pwm_set_period {
167 unsigned int lo; /* 0..8191 */
168 unsigned int hi; /* 0..8191 */
169};
170
171/* Only for modes PWM_STANDARD and PWM_FAST.
172 * For PWM_STANDARD, set duty cycle of 390 kHz PWM output signal, from
173 * 0 (value = 0) to 255/256 (value = 255).
174 * For PWM_FAST, set duty cycle of PWM output signal from
175 * 0% (value = 0) to 100% (value = 255). Output signal in this mode
176 * is a 10ns pulse surrounded by a high or low level depending on duty
177 * cycle (except for 0% and 100% which result in a constant output).
178 * Resulting output frequency varies from 50 MHz at 50% duty cycle,
179 * down to 390 kHz at min/max duty cycle.
180 */
181#define IO_PWM_SET_DUTY 0x22
182
183struct io_pwm_set_duty {
184 int duty; /* 0..255 */
185};
186
187/* Returns information about the latest PWM pulse.
188 * lo: Length of the latest low period, in units of 10ns.
189 * hi: Length of the latest high period, in units of 10ns.
190 * cnt: Time since last detected edge, in units of 10ns.
191 *
192 * The input source to PWM is decied by IO_PWM_SET_INPUT_SRC.
193 *
194 * NOTE: All PWM devices is connected to the same input source.
195 */
196#define IO_PWM_GET_PERIOD 0x23
197
198struct io_pwm_get_period {
199 unsigned int lo;
200 unsigned int hi;
201 unsigned int cnt;
202};
203
204/* Sets the input source for the PWM input. For the src value see the
205 * register description for gio:rw_pwm_in_cfg.
206 *
207 * NOTE: All PWM devices is connected to the same input source.
208 */
209#define IO_PWM_SET_INPUT_SRC 0x24
210struct io_pwm_set_input_src {
211 unsigned int src; /* 0..7 */
212};
213
214/* Sets the duty cycles in steps of 1/256, 0 = 0%, 255 = 100% duty cycle */
215#define IO_PPWM_SET_DUTY 0x25
216
217struct io_ppwm_set_duty {
218 int duty; /* 0..255 */
219};
220
221/* Configuraton struct for the IO_PWMCLK_SET_CONFIG ioctl to configure
222 * PWM capable gpio pins:
223 */
224#define IO_PWMCLK_SETGET_CONFIG 0x26
225struct gpio_pwmclk_conf {
226 unsigned int gpiopin; /* The pin number based on the opened device */
227 unsigned int baseclk; /* The base clock to use, or sw will select one close*/
228 unsigned int low; /* The number of low periods of the baseclk */
229 unsigned int high; /* The number of high periods of the baseclk */
230};
231
232/* Examples:
233 * To get a symmetric 12 MHz clock without knowing anything about the hardware:
234 * baseclk = 12000000, low = 0, high = 0
235 * To just get info of current setting:
236 * baseclk = 0, low = 0, high = 0, the values will be updated by driver.
237 */
238
239#endif 82#endif
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
index e704f81f85cc..31b4bd288cad 100644
--- a/arch/cris/kernel/crisksyms.c
+++ b/arch/cris/kernel/crisksyms.c
@@ -18,7 +18,6 @@
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/fasttimer.h> 19#include <asm/fasttimer.h>
20 20
21extern unsigned long get_cmos_time(void);
22extern void __Udiv(void); 21extern void __Udiv(void);
23extern void __Umod(void); 22extern void __Umod(void);
24extern void __Div(void); 23extern void __Div(void);
@@ -30,7 +29,6 @@ extern void __negdi2(void);
30extern void iounmap(volatile void * __iomem); 29extern void iounmap(volatile void * __iomem);
31 30
32/* Platform dependent support */ 31/* Platform dependent support */
33EXPORT_SYMBOL(get_cmos_time);
34EXPORT_SYMBOL(loops_per_usec); 32EXPORT_SYMBOL(loops_per_usec);
35 33
36/* Math functions */ 34/* Math functions */
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index 7780d379522f..2dda6da71521 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -39,31 +39,6 @@
39extern unsigned long loops_per_jiffy; /* init/main.c */ 39extern unsigned long loops_per_jiffy; /* init/main.c */
40unsigned long loops_per_usec; 40unsigned long loops_per_usec;
41 41
42int set_rtc_mmss(unsigned long nowtime)
43{
44 D(printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime));
45 return 0;
46}
47
48/* grab the time from the RTC chip */
49unsigned long get_cmos_time(void)
50{
51 return 0;
52}
53
54
55int update_persistent_clock(struct timespec now)
56{
57 return set_rtc_mmss(now.tv_sec);
58}
59
60void read_persistent_clock(struct timespec *ts)
61{
62 ts->tv_sec = 0;
63 ts->tv_nsec = 0;
64}
65
66
67extern void cris_profile_sample(struct pt_regs* regs); 42extern void cris_profile_sample(struct pt_regs* regs);
68 43
69void 44void
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 0da689def4cc..64f02d451aa8 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -32,8 +32,8 @@
32 */ 32 */
33 33
34#define ATOMIC_INIT(i) { (i) } 34#define ATOMIC_INIT(i) { (i) }
35#define atomic_read(v) ACCESS_ONCE((v)->counter) 35#define atomic_read(v) READ_ONCE((v)->counter)
36#define atomic_set(v, i) (((v)->counter) = (i)) 36#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
37 37
38static inline int atomic_inc_return(atomic_t *v) 38static inline int atomic_inc_return(atomic_t *v)
39{ 39{
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 702ee539f87d..4435a445ae7e 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -11,8 +11,8 @@
11 11
12#define ATOMIC_INIT(i) { (i) } 12#define ATOMIC_INIT(i) { (i) }
13 13
14#define atomic_read(v) ACCESS_ONCE((v)->counter) 14#define atomic_read(v) READ_ONCE((v)->counter)
15#define atomic_set(v, i) (((v)->counter) = i) 15#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18 18
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 811d61f6422d..55696c4100d4 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -48,7 +48,7 @@ static inline void atomic_set(atomic_t *v, int new)
48 * 48 *
49 * Assumes all word reads on our architecture are atomic. 49 * Assumes all word reads on our architecture are atomic.
50 */ 50 */
51#define atomic_read(v) ((v)->counter) 51#define atomic_read(v) READ_ONCE((v)->counter)
52 52
53/** 53/**
54 * atomic_xchg - atomic 54 * atomic_xchg - atomic
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index be4beeb77d57..8dfb5f6f6c35 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,11 +21,11 @@
21#define ATOMIC_INIT(i) { (i) } 21#define ATOMIC_INIT(i) { (i) }
22#define ATOMIC64_INIT(i) { (i) } 22#define ATOMIC64_INIT(i) { (i) }
23 23
24#define atomic_read(v) ACCESS_ONCE((v)->counter) 24#define atomic_read(v) READ_ONCE((v)->counter)
25#define atomic64_read(v) ACCESS_ONCE((v)->counter) 25#define atomic64_read(v) READ_ONCE((v)->counter)
26 26
27#define atomic_set(v,i) (((v)->counter) = (i)) 27#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
28#define atomic64_set(v,i) (((v)->counter) = (i)) 28#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
29 29
30#define ATOMIC_OP(op, c_op) \ 30#define ATOMIC_OP(op, c_op) \
31static __inline__ int \ 31static __inline__ int \
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 99c96a5e6016..db73390568c8 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
11 11
12 12
13 13
14#define NR_syscalls 321 /* length of syscall table */ 14#define NR_syscalls 322 /* length of syscall table */
15 15
16/* 16/*
17 * The following defines stop scripts/checksyscalls.sh from complaining about 17 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 98e94e19a5a0..9038726e7d26 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -334,5 +334,6 @@
334#define __NR_execveat 1342 334#define __NR_execveat 1342
335#define __NR_userfaultfd 1343 335#define __NR_userfaultfd 1343
336#define __NR_membarrier 1344 336#define __NR_membarrier 1344
337#define __NR_kcmp 1345
337 338
338#endif /* _UAPI_ASM_IA64_UNISTD_H */ 339#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 37cc7a65cd3e..dcd97f84d065 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1770,5 +1770,6 @@ sys_call_table:
1770 data8 sys_execveat 1770 data8 sys_execveat
1771 data8 sys_userfaultfd 1771 data8 sys_userfaultfd
1772 data8 sys_membarrier 1772 data8 sys_membarrier
1773 data8 sys_kcmp // 1345
1773 1774
1774 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1775 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 025e2a170493..ea35160d632b 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -28,7 +28,7 @@
28 * 28 *
29 * Atomically reads the value of @v. 29 * Atomically reads the value of @v.
30 */ 30 */
31#define atomic_read(v) ACCESS_ONCE((v)->counter) 31#define atomic_read(v) READ_ONCE((v)->counter)
32 32
33/** 33/**
34 * atomic_set - set atomic variable 34 * atomic_set - set atomic variable
@@ -37,7 +37,7 @@
37 * 37 *
38 * Atomically sets the value of @v to @i. 38 * Atomically sets the value of @v to @i.
39 */ 39 */
40#define atomic_set(v,i) (((v)->counter) = (i)) 40#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
41 41
42#ifdef CONFIG_CHIP_M32700_TS1 42#ifdef CONFIG_CHIP_M32700_TS1
43#define __ATOMIC_CLOBBER , "r4" 43#define __ATOMIC_CLOBBER , "r4"
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 039fac120cc0..4858178260f9 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -17,8 +17,8 @@
17 17
18#define ATOMIC_INIT(i) { (i) } 18#define ATOMIC_INIT(i) { (i) }
19 19
20#define atomic_read(v) ACCESS_ONCE((v)->counter) 20#define atomic_read(v) READ_ONCE((v)->counter)
21#define atomic_set(v, i) (((v)->counter) = i) 21#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
22 22
23/* 23/*
24 * The ColdFire parts cannot do some immediate to memory operations, 24 * The ColdFire parts cannot do some immediate to memory operations,
diff --git a/arch/m68k/sun3/idprom.c b/arch/m68k/sun3/idprom.c
index c86ac37d1983..cfe9aa422343 100644
--- a/arch/m68k/sun3/idprom.c
+++ b/arch/m68k/sun3/idprom.c
@@ -125,8 +125,5 @@ void __init idprom_init(void)
125 125
126 display_system_type(idprom->id_machtype); 126 display_system_type(idprom->id_machtype);
127 127
128 printk("Ethernet address: %x:%x:%x:%x:%x:%x\n", 128 printk("Ethernet address: %pM\n", idprom->id_ethaddr);
129 idprom->id_ethaddr[0], idprom->id_ethaddr[1],
130 idprom->id_ethaddr[2], idprom->id_ethaddr[3],
131 idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
132} 129}
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index 21c4c268b86c..a62581815624 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -3,7 +3,7 @@
3 3
4#define ATOMIC_INIT(i) { (i) } 4#define ATOMIC_INIT(i) { (i) }
5 5
6#define atomic_set(v, i) ((v)->counter = (i)) 6#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
7 7
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9 9
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index f8efe380fe8b..0295d9b8d5bf 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -10,7 +10,7 @@
10 10
11static inline int atomic_read(const atomic_t *v) 11static inline int atomic_read(const atomic_t *v)
12{ 12{
13 return (v)->counter; 13 return READ_ONCE((v)->counter);
14} 14}
15 15
16/* 16/*
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 0352bc8d56b3..4f9eb0576884 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1094,7 +1094,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
1094 unsigned int pin; 1094 unsigned int pin;
1095 unsigned int trigger; 1095 unsigned int trigger;
1096 1096
1097 if (d->of_node != node) 1097 if (irq_domain_get_of_node(d) != node)
1098 return -EINVAL; 1098 return -EINVAL;
1099 1099
1100 if (intsize < 2) 1100 if (intsize < 2)
@@ -2163,7 +2163,7 @@ static int octeon_irq_cib_map(struct irq_domain *d,
2163 2163
2164 if (hw >= host_data->max_bits) { 2164 if (hw >= host_data->max_bits) {
2165 pr_err("ERROR: %s mapping %u is to big!\n", 2165 pr_err("ERROR: %s mapping %u is to big!\n",
2166 d->of_node->name, (unsigned)hw); 2166 irq_domain_get_of_node(d)->name, (unsigned)hw);
2167 return -EINVAL; 2167 return -EINVAL;
2168 } 2168 }
2169 2169
diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
index 642b50946943..8b7429127a1d 100644
--- a/arch/mips/configs/pistachio_defconfig
+++ b/arch/mips/configs/pistachio_defconfig
@@ -257,7 +257,6 @@ CONFIG_MMC=y
257CONFIG_MMC_BLOCK_MINORS=16 257CONFIG_MMC_BLOCK_MINORS=16
258CONFIG_MMC_TEST=m 258CONFIG_MMC_TEST=m
259CONFIG_MMC_DW=y 259CONFIG_MMC_DW=y
260CONFIG_MMC_DW_IDMAC=y
261CONFIG_NEW_LEDS=y 260CONFIG_NEW_LEDS=y
262CONFIG_LEDS_CLASS=y 261CONFIG_LEDS_CLASS=y
263CONFIG_RTC_CLASS=y 262CONFIG_RTC_CLASS=y
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 4c42fd9af777..f82d3af07931 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -30,7 +30,7 @@
30 * 30 *
31 * Atomically reads the value of @v. 31 * Atomically reads the value of @v.
32 */ 32 */
33#define atomic_read(v) ACCESS_ONCE((v)->counter) 33#define atomic_read(v) READ_ONCE((v)->counter)
34 34
35/* 35/*
36 * atomic_set - set atomic variable 36 * atomic_set - set atomic variable
@@ -39,7 +39,7 @@
39 * 39 *
40 * Atomically sets the value of @v to @i. 40 * Atomically sets the value of @v to @i.
41 */ 41 */
42#define atomic_set(v, i) ((v)->counter = (i)) 42#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
43 43
44#define ATOMIC_OP(op, c_op, asm_op) \ 44#define ATOMIC_OP(op, c_op, asm_op) \
45static __inline__ void atomic_##op(int i, atomic_t * v) \ 45static __inline__ void atomic_##op(int i, atomic_t * v) \
@@ -315,14 +315,14 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
315 * @v: pointer of type atomic64_t 315 * @v: pointer of type atomic64_t
316 * 316 *
317 */ 317 */
318#define atomic64_read(v) ACCESS_ONCE((v)->counter) 318#define atomic64_read(v) READ_ONCE((v)->counter)
319 319
320/* 320/*
321 * atomic64_set - set atomic variable 321 * atomic64_set - set atomic variable
322 * @v: pointer of type atomic64_t 322 * @v: pointer of type atomic64_t
323 * @i: required value 323 * @i: required value
324 */ 324 */
325#define atomic64_set(v, i) ((v)->counter = (i)) 325#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
326 326
327#define ATOMIC64_OP(op, c_op, asm_op) \ 327#define ATOMIC64_OP(op, c_op, asm_op) \
328static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 328static __inline__ void atomic64_##op(long i, atomic64_t * v) \
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
index 2e52cbd20ceb..7a584e0bf933 100644
--- a/arch/mips/mti-sead3/Makefile
+++ b/arch/mips/mti-sead3/Makefile
@@ -12,6 +12,4 @@ obj-y := sead3-lcd.o sead3-display.o sead3-init.o \
12 sead3-int.o sead3-platform.o sead3-reset.o \ 12 sead3-int.o sead3-platform.o sead3-reset.o \
13 sead3-setup.o sead3-time.o 13 sead3-setup.o sead3-time.o
14 14
15obj-y += leds-sead3.o
16
17obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o 15obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 375e59140c9c..ce318d5ab23b 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -34,7 +34,7 @@
34 * 34 *
35 * Atomically reads the value of @v. Note that the guaranteed 35 * Atomically reads the value of @v. Note that the guaranteed
36 */ 36 */
37#define atomic_read(v) (ACCESS_ONCE((v)->counter)) 37#define atomic_read(v) READ_ONCE((v)->counter)
38 38
39/** 39/**
40 * atomic_set - set atomic variable 40 * atomic_set - set atomic variable
@@ -43,7 +43,7 @@
43 * 43 *
44 * Atomically sets the value of @v to @i. Note that the guaranteed 44 * Atomically sets the value of @v to @i. Note that the guaranteed
45 */ 45 */
46#define atomic_set(v, i) (((v)->counter) = (i)) 46#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
47 47
48#define ATOMIC_OP(op) \ 48#define ATOMIC_OP(op) \
49static inline void atomic_##op(int i, atomic_t *v) \ 49static inline void atomic_##op(int i, atomic_t *v) \
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 2536965d00ea..1d109990a022 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -67,7 +67,7 @@ static __inline__ void atomic_set(atomic_t *v, int i)
67 67
68static __inline__ int atomic_read(const atomic_t *v) 68static __inline__ int atomic_read(const atomic_t *v)
69{ 69{
70 return ACCESS_ONCE((v)->counter); 70 return READ_ONCE((v)->counter);
71} 71}
72 72
73/* exported interface */ 73/* exported interface */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 0dc42c5082b7..5f8229e24fe6 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -3,7 +3,6 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <asm/reg.h>
7 6
8/* bytes per L1 cache line */ 7/* bytes per L1 cache line */
9#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) 8#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -40,12 +39,6 @@ struct ppc64_caches {
40}; 39};
41 40
42extern struct ppc64_caches ppc64_caches; 41extern struct ppc64_caches ppc64_caches;
43
44static inline void logmpp(u64 x)
45{
46 asm volatile(PPC_LOGMPP(R1) : : "r" (x));
47}
48
49#endif /* __powerpc64__ && ! __ASSEMBLY__ */ 42#endif /* __powerpc64__ && ! __ASSEMBLY__ */
50 43
51#if defined(__ASSEMBLY__) 44#if defined(__ASSEMBLY__)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 827a38d7a9db..887c259556df 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -297,8 +297,6 @@ struct kvmppc_vcore {
297 u32 arch_compat; 297 u32 arch_compat;
298 ulong pcr; 298 ulong pcr;
299 ulong dpdes; /* doorbell state (POWER8) */ 299 ulong dpdes; /* doorbell state (POWER8) */
300 void *mpp_buffer; /* Micro Partition Prefetch buffer */
301 bool mpp_buffer_is_valid;
302 ulong conferring_threads; 300 ulong conferring_threads;
303}; 301};
304 302
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 790f5d1d9a46..7ab04fc59e24 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -141,7 +141,6 @@
141#define PPC_INST_ISEL 0x7c00001e 141#define PPC_INST_ISEL 0x7c00001e
142#define PPC_INST_ISEL_MASK 0xfc00003e 142#define PPC_INST_ISEL_MASK 0xfc00003e
143#define PPC_INST_LDARX 0x7c0000a8 143#define PPC_INST_LDARX 0x7c0000a8
144#define PPC_INST_LOGMPP 0x7c0007e4
145#define PPC_INST_LSWI 0x7c0004aa 144#define PPC_INST_LSWI 0x7c0004aa
146#define PPC_INST_LSWX 0x7c00042a 145#define PPC_INST_LSWX 0x7c00042a
147#define PPC_INST_LWARX 0x7c000028 146#define PPC_INST_LWARX 0x7c000028
@@ -285,20 +284,6 @@
285#define __PPC_EH(eh) 0 284#define __PPC_EH(eh) 0
286#endif 285#endif
287 286
288/* POWER8 Micro Partition Prefetch (MPP) parameters */
289/* Address mask is common for LOGMPP instruction and MPPR SPR */
290#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL
291
292/* Bits 60 and 61 of MPP SPR should be set to one of the following */
293/* Aborting the fetch is indeed setting 00 in the table size bits */
294#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
295#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
296
297/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
298#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
299#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
300#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
301
302/* Deal with instructions that older assemblers aren't aware of */ 287/* Deal with instructions that older assemblers aren't aware of */
303#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ 288#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
304 __PPC_RA(a) | __PPC_RB(b)) 289 __PPC_RA(a) | __PPC_RB(b))
@@ -307,8 +292,6 @@
307#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ 292#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
308 ___PPC_RT(t) | ___PPC_RA(a) | \ 293 ___PPC_RT(t) | ___PPC_RA(a) | \
309 ___PPC_RB(b) | __PPC_EH(eh)) 294 ___PPC_RB(b) | __PPC_EH(eh))
310#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
311 __PPC_RB(b))
312#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ 295#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
313 ___PPC_RT(t) | ___PPC_RA(a) | \ 296 ___PPC_RT(t) | ___PPC_RA(a) | \
314 ___PPC_RB(b) | __PPC_EH(eh)) 297 ___PPC_RB(b) | __PPC_EH(eh))
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index aa1cc5f015ee..a908ada8e0a5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -226,7 +226,6 @@
226#define CTRL_TE 0x00c00000 /* thread enable */ 226#define CTRL_TE 0x00c00000 /* thread enable */
227#define CTRL_RUNLATCH 0x1 227#define CTRL_RUNLATCH 0x1
228#define SPRN_DAWR 0xB4 228#define SPRN_DAWR 0xB4
229#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
230#define SPRN_RPR 0xBA /* Relative Priority Register */ 229#define SPRN_RPR 0xBA /* Relative Priority Register */
231#define SPRN_CIABR 0xBB 230#define SPRN_CIABR 0xBB
232#define CIABR_PRIV 0x3 231#define CIABR_PRIV 0x3
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 59503ed98e5f..3f1472a78f39 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -303,7 +303,7 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
303 dev->coherent_dma_mask = mask; 303 dev->coherent_dma_mask = mask;
304 return 0; 304 return 0;
305} 305}
306EXPORT_SYMBOL_GPL(dma_set_coherent_mask); 306EXPORT_SYMBOL(dma_set_coherent_mask);
307 307
308#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 308#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
309 309
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 84bf934cf748..5a753fae8265 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1043,6 +1043,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
1043 if (!capable(CAP_SYS_ADMIN)) 1043 if (!capable(CAP_SYS_ADMIN))
1044 return -EPERM; 1044 return -EPERM;
1045 1045
1046 if (!rtas.entry)
1047 return -EINVAL;
1048
1046 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) 1049 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
1047 return -EFAULT; 1050 return -EFAULT;
1048 1051
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 228049786888..9c26c5a96ea2 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -36,7 +36,6 @@
36 36
37#include <asm/reg.h> 37#include <asm/reg.h>
38#include <asm/cputable.h> 38#include <asm/cputable.h>
39#include <asm/cache.h>
40#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
41#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
42#include <asm/uaccess.h> 41#include <asm/uaccess.h>
@@ -75,12 +74,6 @@
75 74
76static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); 75static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
77 76
78#if defined(CONFIG_PPC_64K_PAGES)
79#define MPP_BUFFER_ORDER 0
80#elif defined(CONFIG_PPC_4K_PAGES)
81#define MPP_BUFFER_ORDER 3
82#endif
83
84static int dynamic_mt_modes = 6; 77static int dynamic_mt_modes = 6;
85module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); 78module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
86MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); 79MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
@@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1455 vcore->kvm = kvm; 1448 vcore->kvm = kvm;
1456 INIT_LIST_HEAD(&vcore->preempt_list); 1449 INIT_LIST_HEAD(&vcore->preempt_list);
1457 1450
1458 vcore->mpp_buffer_is_valid = false;
1459
1460 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1461 vcore->mpp_buffer = (void *)__get_free_pages(
1462 GFP_KERNEL|__GFP_ZERO,
1463 MPP_BUFFER_ORDER);
1464
1465 return vcore; 1451 return vcore;
1466} 1452}
1467 1453
@@ -1894,33 +1880,6 @@ static int on_primary_thread(void)
1894 return 1; 1880 return 1;
1895} 1881}
1896 1882
1897static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
1898{
1899 phys_addr_t phy_addr, mpp_addr;
1900
1901 phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
1902 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1903
1904 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
1905 logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
1906
1907 vc->mpp_buffer_is_valid = true;
1908}
1909
1910static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
1911{
1912 phys_addr_t phy_addr, mpp_addr;
1913
1914 phy_addr = virt_to_phys(vc->mpp_buffer);
1915 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1916
1917 /* We must abort any in-progress save operations to ensure
1918 * the table is valid so that prefetch engine knows when to
1919 * stop prefetching. */
1920 logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
1921 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
1922}
1923
1924/* 1883/*
1925 * A list of virtual cores for each physical CPU. 1884 * A list of virtual cores for each physical CPU.
1926 * These are vcores that could run but their runner VCPU tasks are 1885 * These are vcores that could run but their runner VCPU tasks are
@@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2471 2430
2472 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 2431 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2473 2432
2474 if (vc->mpp_buffer_is_valid)
2475 kvmppc_start_restoring_l2_cache(vc);
2476
2477 __kvmppc_vcore_entry(); 2433 __kvmppc_vcore_entry();
2478 2434
2479 if (vc->mpp_buffer)
2480 kvmppc_start_saving_l2_cache(vc);
2481
2482 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); 2435 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2483 2436
2484 spin_lock(&vc->lock); 2437 spin_lock(&vc->lock);
@@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm)
3073{ 3026{
3074 long int i; 3027 long int i;
3075 3028
3076 for (i = 0; i < KVM_MAX_VCORES; ++i) { 3029 for (i = 0; i < KVM_MAX_VCORES; ++i)
3077 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
3078 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
3079 free_pages((unsigned long)vc->mpp_buffer,
3080 MPP_BUFFER_ORDER);
3081 }
3082 kfree(kvm->arch.vcores[i]); 3030 kfree(kvm->arch.vcores[i]);
3083 }
3084 kvm->arch.online_vcores = 0; 3031 kvm->arch.online_vcores = 0;
3085} 3032}
3086 3033
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index b0382f3f1095..d1e65ce545b3 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -48,7 +48,7 @@ struct cpu_hw_events {
48 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 48 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
49 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 49 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
50 50
51 unsigned int group_flag; 51 unsigned int txn_flags;
52 int n_txn_start; 52 int n_txn_start;
53 53
54 /* BHRB bits */ 54 /* BHRB bits */
@@ -1441,7 +1441,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
1441 * skip the schedulability test here, it will be performed 1441 * skip the schedulability test here, it will be performed
1442 * at commit time(->commit_txn) as a whole 1442 * at commit time(->commit_txn) as a whole
1443 */ 1443 */
1444 if (cpuhw->group_flag & PERF_EVENT_TXN) 1444 if (cpuhw->txn_flags & PERF_PMU_TXN_ADD)
1445 goto nocheck; 1445 goto nocheck;
1446 1446
1447 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) 1447 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
@@ -1586,13 +1586,22 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
1586 * Start group events scheduling transaction 1586 * Start group events scheduling transaction
1587 * Set the flag to make pmu::enable() not perform the 1587 * Set the flag to make pmu::enable() not perform the
1588 * schedulability test, it will be performed at commit time 1588 * schedulability test, it will be performed at commit time
1589 *
1590 * We only support PERF_PMU_TXN_ADD transactions. Save the
1591 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1592 * transactions.
1589 */ 1593 */
1590static void power_pmu_start_txn(struct pmu *pmu) 1594static void power_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
1591{ 1595{
1592 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 1596 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1593 1597
1598 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
1599
1600 cpuhw->txn_flags = txn_flags;
1601 if (txn_flags & ~PERF_PMU_TXN_ADD)
1602 return;
1603
1594 perf_pmu_disable(pmu); 1604 perf_pmu_disable(pmu);
1595 cpuhw->group_flag |= PERF_EVENT_TXN;
1596 cpuhw->n_txn_start = cpuhw->n_events; 1605 cpuhw->n_txn_start = cpuhw->n_events;
1597} 1606}
1598 1607
@@ -1604,8 +1613,15 @@ static void power_pmu_start_txn(struct pmu *pmu)
1604static void power_pmu_cancel_txn(struct pmu *pmu) 1613static void power_pmu_cancel_txn(struct pmu *pmu)
1605{ 1614{
1606 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 1615 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1616 unsigned int txn_flags;
1617
1618 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
1619
1620 txn_flags = cpuhw->txn_flags;
1621 cpuhw->txn_flags = 0;
1622 if (txn_flags & ~PERF_PMU_TXN_ADD)
1623 return;
1607 1624
1608 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1609 perf_pmu_enable(pmu); 1625 perf_pmu_enable(pmu);
1610} 1626}
1611 1627
@@ -1621,7 +1637,15 @@ static int power_pmu_commit_txn(struct pmu *pmu)
1621 1637
1622 if (!ppmu) 1638 if (!ppmu)
1623 return -EAGAIN; 1639 return -EAGAIN;
1640
1624 cpuhw = this_cpu_ptr(&cpu_hw_events); 1641 cpuhw = this_cpu_ptr(&cpu_hw_events);
1642 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
1643
1644 if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
1645 cpuhw->txn_flags = 0;
1646 return 0;
1647 }
1648
1625 n = cpuhw->n_events; 1649 n = cpuhw->n_events;
1626 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) 1650 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
1627 return -EAGAIN; 1651 return -EAGAIN;
@@ -1632,7 +1656,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
1632 for (i = cpuhw->n_txn_start; i < n; ++i) 1656 for (i = cpuhw->n_txn_start; i < n; ++i)
1633 cpuhw->event[i]->hw.config = cpuhw->events[i]; 1657 cpuhw->event[i]->hw.config = cpuhw->events[i];
1634 1658
1635 cpuhw->group_flag &= ~PERF_EVENT_TXN; 1659 cpuhw->txn_flags = 0;
1636 perf_pmu_enable(pmu); 1660 perf_pmu_enable(pmu);
1637 return 0; 1661 return 0;
1638} 1662}
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 527c8b98e97e..9f9dfda9ed2c 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -142,6 +142,15 @@ static struct attribute_group event_long_desc_group = {
142 142
143static struct kmem_cache *hv_page_cache; 143static struct kmem_cache *hv_page_cache;
144 144
145DEFINE_PER_CPU(int, hv_24x7_txn_flags);
146DEFINE_PER_CPU(int, hv_24x7_txn_err);
147
148struct hv_24x7_hw {
149 struct perf_event *events[255];
150};
151
152DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
153
145/* 154/*
146 * request_buffer and result_buffer are not required to be 4k aligned, 155 * request_buffer and result_buffer are not required to be 4k aligned,
147 * but are not allowed to cross any 4k boundary. Aligning them to 4k is 156 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
@@ -1231,9 +1240,48 @@ static void update_event_count(struct perf_event *event, u64 now)
1231static void h_24x7_event_read(struct perf_event *event) 1240static void h_24x7_event_read(struct perf_event *event)
1232{ 1241{
1233 u64 now; 1242 u64 now;
1243 struct hv_24x7_request_buffer *request_buffer;
1244 struct hv_24x7_hw *h24x7hw;
1245 int txn_flags;
1246
1247 txn_flags = __this_cpu_read(hv_24x7_txn_flags);
1248
1249 /*
1250 * If in a READ transaction, add this counter to the list of
1251 * counters to read during the next HCALL (i.e commit_txn()).
1252 * If not in a READ transaction, go ahead and make the HCALL
1253 * to read this counter by itself.
1254 */
1255
1256 if (txn_flags & PERF_PMU_TXN_READ) {
1257 int i;
1258 int ret;
1234 1259
1235 now = h_24x7_get_value(event); 1260 if (__this_cpu_read(hv_24x7_txn_err))
1236 update_event_count(event, now); 1261 return;
1262
1263 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1264
1265 ret = add_event_to_24x7_request(event, request_buffer);
1266 if (ret) {
1267 __this_cpu_write(hv_24x7_txn_err, ret);
1268 } else {
1269 /*
1270 * Assoicate the event with the HCALL request index,
1271 * so ->commit_txn() can quickly find/update count.
1272 */
1273 i = request_buffer->num_requests - 1;
1274
1275 h24x7hw = &get_cpu_var(hv_24x7_hw);
1276 h24x7hw->events[i] = event;
1277 put_cpu_var(h24x7hw);
1278 }
1279
1280 put_cpu_var(hv_24x7_reqb);
1281 } else {
1282 now = h_24x7_get_value(event);
1283 update_event_count(event, now);
1284 }
1237} 1285}
1238 1286
1239static void h_24x7_event_start(struct perf_event *event, int flags) 1287static void h_24x7_event_start(struct perf_event *event, int flags)
@@ -1255,6 +1303,117 @@ static int h_24x7_event_add(struct perf_event *event, int flags)
1255 return 0; 1303 return 0;
1256} 1304}
1257 1305
1306/*
1307 * 24x7 counters only support READ transactions. They are
1308 * always counting and dont need/support ADD transactions.
1309 * Cache the flags, but otherwise ignore transactions that
1310 * are not PERF_PMU_TXN_READ.
1311 */
1312static void h_24x7_event_start_txn(struct pmu *pmu, unsigned int flags)
1313{
1314 struct hv_24x7_request_buffer *request_buffer;
1315 struct hv_24x7_data_result_buffer *result_buffer;
1316
1317 /* We should not be called if we are already in a txn */
1318 WARN_ON_ONCE(__this_cpu_read(hv_24x7_txn_flags));
1319
1320 __this_cpu_write(hv_24x7_txn_flags, flags);
1321 if (flags & ~PERF_PMU_TXN_READ)
1322 return;
1323
1324 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1325 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1326
1327 init_24x7_request(request_buffer, result_buffer);
1328
1329 put_cpu_var(hv_24x7_resb);
1330 put_cpu_var(hv_24x7_reqb);
1331}
1332
1333/*
1334 * Clean up transaction state.
1335 *
1336 * NOTE: Ignore state of request and result buffers for now.
1337 * We will initialize them during the next read/txn.
1338 */
1339static void reset_txn(void)
1340{
1341 __this_cpu_write(hv_24x7_txn_flags, 0);
1342 __this_cpu_write(hv_24x7_txn_err, 0);
1343}
1344
1345/*
1346 * 24x7 counters only support READ transactions. They are always counting
1347 * and dont need/support ADD transactions. Clear ->txn_flags but otherwise
1348 * ignore transactions that are not of type PERF_PMU_TXN_READ.
1349 *
1350 * For READ transactions, submit all pending 24x7 requests (i.e requests
1351 * that were queued by h_24x7_event_read()), to the hypervisor and update
1352 * the event counts.
1353 */
1354static int h_24x7_event_commit_txn(struct pmu *pmu)
1355{
1356 struct hv_24x7_request_buffer *request_buffer;
1357 struct hv_24x7_data_result_buffer *result_buffer;
1358 struct hv_24x7_result *resb;
1359 struct perf_event *event;
1360 u64 count;
1361 int i, ret, txn_flags;
1362 struct hv_24x7_hw *h24x7hw;
1363
1364 txn_flags = __this_cpu_read(hv_24x7_txn_flags);
1365 WARN_ON_ONCE(!txn_flags);
1366
1367 ret = 0;
1368 if (txn_flags & ~PERF_PMU_TXN_READ)
1369 goto out;
1370
1371 ret = __this_cpu_read(hv_24x7_txn_err);
1372 if (ret)
1373 goto out;
1374
1375 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1376 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1377
1378 ret = make_24x7_request(request_buffer, result_buffer);
1379 if (ret) {
1380 log_24x7_hcall(request_buffer, result_buffer, ret);
1381 goto put_reqb;
1382 }
1383
1384 h24x7hw = &get_cpu_var(hv_24x7_hw);
1385
1386 /* Update event counts from hcall */
1387 for (i = 0; i < request_buffer->num_requests; i++) {
1388 resb = &result_buffer->results[i];
1389 count = be64_to_cpu(resb->elements[0].element_data[0]);
1390 event = h24x7hw->events[i];
1391 h24x7hw->events[i] = NULL;
1392 update_event_count(event, count);
1393 }
1394
1395 put_cpu_var(hv_24x7_hw);
1396
1397put_reqb:
1398 put_cpu_var(hv_24x7_resb);
1399 put_cpu_var(hv_24x7_reqb);
1400out:
1401 reset_txn();
1402 return ret;
1403}
1404
1405/*
1406 * 24x7 counters only support READ transactions. They are always counting
1407 * and dont need/support ADD transactions. However, regardless of type
1408 * of transaction, all we need to do is cleanup, so we don't have to check
1409 * the type of transaction.
1410 */
1411static void h_24x7_event_cancel_txn(struct pmu *pmu)
1412{
1413 WARN_ON_ONCE(!__this_cpu_read(hv_24x7_txn_flags));
1414 reset_txn();
1415}
1416
1258static struct pmu h_24x7_pmu = { 1417static struct pmu h_24x7_pmu = {
1259 .task_ctx_nr = perf_invalid_context, 1418 .task_ctx_nr = perf_invalid_context,
1260 1419
@@ -1266,6 +1425,9 @@ static struct pmu h_24x7_pmu = {
1266 .start = h_24x7_event_start, 1425 .start = h_24x7_event_start,
1267 .stop = h_24x7_event_stop, 1426 .stop = h_24x7_event_stop,
1268 .read = h_24x7_event_read, 1427 .read = h_24x7_event_read,
1428 .start_txn = h_24x7_event_start_txn,
1429 .commit_txn = h_24x7_event_commit_txn,
1430 .cancel_txn = h_24x7_event_cancel_txn,
1269}; 1431};
1270 1432
1271static int hv_24x7_init(void) 1433static int hv_24x7_init(void)
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 396351db601b..7d5e295255b7 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -676,6 +676,9 @@ static u64 power8_bhrb_filter_map(u64 branch_sample_type)
676 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) 676 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
677 return -1; 677 return -1;
678 678
679 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
680 return -1;
681
679 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) { 682 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
680 pmu_bhrb_filter |= POWER8_MMCRA_IFM1; 683 pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
681 return pmu_bhrb_filter; 684 return pmu_bhrb_filter;
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index e0e68a1c0d3c..aed7714495c1 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -327,7 +327,7 @@ static void axon_msi_shutdown(struct platform_device *device)
327 u32 tmp; 327 u32 tmp;
328 328
329 pr_devel("axon_msi: disabling %s\n", 329 pr_devel("axon_msi: disabling %s\n",
330 msic->irq_domain->of_node->full_name); 330 irq_domain_get_of_node(msic->irq_domain)->full_name);
331 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); 331 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
332 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; 332 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
333 msic_dcr_write(msic, MSIC_CTRL_REG, tmp); 333 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 9d27de62dc62..54ee5743cb72 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -231,20 +231,23 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
231 const u32 *imap, *tmp; 231 const u32 *imap, *tmp;
232 int imaplen, intsize, unit; 232 int imaplen, intsize, unit;
233 struct device_node *iic; 233 struct device_node *iic;
234 struct device_node *of_node;
235
236 of_node = irq_domain_get_of_node(pic->host);
234 237
235 /* First, we check whether we have a real "interrupts" in the device 238 /* First, we check whether we have a real "interrupts" in the device
236 * tree in case the device-tree is ever fixed 239 * tree in case the device-tree is ever fixed
237 */ 240 */
238 virq = irq_of_parse_and_map(pic->host->of_node, 0); 241 virq = irq_of_parse_and_map(of_node, 0);
239 if (virq) 242 if (virq)
240 return virq; 243 return virq;
241 244
242 /* Now do the horrible hacks */ 245 /* Now do the horrible hacks */
243 tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL); 246 tmp = of_get_property(of_node, "#interrupt-cells", NULL);
244 if (tmp == NULL) 247 if (tmp == NULL)
245 return NO_IRQ; 248 return NO_IRQ;
246 intsize = *tmp; 249 intsize = *tmp;
247 imap = of_get_property(pic->host->of_node, "interrupt-map", &imaplen); 250 imap = of_get_property(of_node, "interrupt-map", &imaplen);
248 if (imap == NULL || imaplen < (intsize + 1)) 251 if (imap == NULL || imaplen < (intsize + 1))
249 return NO_IRQ; 252 return NO_IRQ;
250 iic = of_find_node_by_phandle(imap[intsize]); 253 iic = of_find_node_by_phandle(imap[intsize]);
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
index b304a9fe55cc..d9af76342d99 100644
--- a/arch/powerpc/platforms/pasemi/msi.c
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -144,9 +144,11 @@ int mpic_pasemi_msi_init(struct mpic *mpic)
144{ 144{
145 int rc; 145 int rc;
146 struct pci_controller *phb; 146 struct pci_controller *phb;
147 struct device_node *of_node;
147 148
148 if (!mpic->irqhost->of_node || 149 of_node = irq_domain_get_of_node(mpic->irqhost);
149 !of_device_is_compatible(mpic->irqhost->of_node, 150 if (!of_node ||
151 !of_device_is_compatible(of_node,
150 "pasemi,pwrficient-openpic")) 152 "pasemi,pwrficient-openpic"))
151 return -ENODEV; 153 return -ENODEV;
152 154
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index 2c91ee7800b9..6ccfb6c1c707 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -137,7 +137,7 @@ static void opal_handle_irq_work(struct irq_work *work)
137static int opal_event_match(struct irq_domain *h, struct device_node *node, 137static int opal_event_match(struct irq_domain *h, struct device_node *node,
138 enum irq_domain_bus_token bus_token) 138 enum irq_domain_bus_token bus_token)
139{ 139{
140 return h->of_node == node; 140 return irq_domain_get_of_node(h) == node;
141} 141}
142 142
143static int opal_event_xlate(struct irq_domain *h, struct device_node *np, 143static int opal_event_xlate(struct irq_domain *h, struct device_node *np,
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 8f70ba681a78..ca264833ee64 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void)
171 * so clear LPCR:PECE1. We keep PECE2 enabled. 171 * so clear LPCR:PECE1. We keep PECE2 enabled.
172 */ 172 */
173 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); 173 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
174
175 /*
176 * Hard-disable interrupts, and then clear irq_happened flags
177 * that we can safely ignore while off-line, since they
178 * are for things for which we do no processing when off-line
179 * (or in the case of HMI, all the processing we need to do
180 * is done in lower-level real-mode code).
181 */
182 hard_irq_disable();
183 local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI);
184
174 while (!generic_check_cpu_restart(cpu)) { 185 while (!generic_check_cpu_restart(cpu)) {
186 /*
187 * Clear IPI flag, since we don't handle IPIs while
188 * offline, except for those when changing micro-threading
189 * mode, which are handled explicitly below, and those
190 * for coming online, which are handled via
191 * generic_check_cpu_restart() calls.
192 */
193 kvmppc_set_host_ipi(cpu, 0);
175 194
176 ppc64_runlatch_off(); 195 ppc64_runlatch_off();
177 196
@@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void)
196 * having finished executing in a KVM guest, then srr1 215 * having finished executing in a KVM guest, then srr1
197 * contains 0. 216 * contains 0.
198 */ 217 */
199 if ((srr1 & wmask) == SRR1_WAKEEE) { 218 if (((srr1 & wmask) == SRR1_WAKEEE) ||
219 (local_paca->irq_happened & PACA_IRQ_EE)) {
200 icp_native_flush_interrupt(); 220 icp_native_flush_interrupt();
201 local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
202 smp_mb();
203 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { 221 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
204 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 222 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
205 asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); 223 asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
206 kvmppc_set_host_ipi(cpu, 0);
207 } 224 }
225 local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL);
226 smp_mb();
208 227
209 if (cpu_core_split_required()) 228 if (cpu_core_split_required())
210 continue; 229 continue;
211 230
212 if (!generic_check_cpu_restart(cpu)) 231 if (srr1 && !generic_check_cpu_restart(cpu))
213 DBG("CPU%d Unexpected exit while offline !\n", cpu); 232 DBG("CPU%d Unexpected exit while offline !\n", cpu);
214 } 233 }
215 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); 234 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index eca0b00794fa..bffcc7a486a1 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -181,7 +181,8 @@ static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
181 enum irq_domain_bus_token bus_token) 181 enum irq_domain_bus_token bus_token)
182{ 182{
183 /* Exact match, unless ehv_pic node is NULL */ 183 /* Exact match, unless ehv_pic node is NULL */
184 return h->of_node == NULL || h->of_node == node; 184 struct device_node *of_node = irq_domain_get_of_node(h);
185 return of_node == NULL || of_node == node;
185} 186}
186 187
187static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq, 188static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 48a576aa47b9..3a2be3676f43 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -110,7 +110,7 @@ static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
110 int rc, hwirq; 110 int rc, hwirq;
111 111
112 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX, 112 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
113 msi_data->irqhost->of_node); 113 irq_domain_get_of_node(msi_data->irqhost));
114 if (rc) 114 if (rc)
115 return rc; 115 return rc;
116 116
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index e1a9c2c2d5d3..6f99ed3967fd 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -165,7 +165,8 @@ static struct resource pic_edgectrl_iores = {
165static int i8259_host_match(struct irq_domain *h, struct device_node *node, 165static int i8259_host_match(struct irq_domain *h, struct device_node *node,
166 enum irq_domain_bus_token bus_token) 166 enum irq_domain_bus_token bus_token)
167{ 167{
168 return h->of_node == NULL || h->of_node == node; 168 struct device_node *of_node = irq_domain_get_of_node(h);
169 return of_node == NULL || of_node == node;
169} 170}
170 171
171static int i8259_host_map(struct irq_domain *h, unsigned int virq, 172static int i8259_host_map(struct irq_domain *h, unsigned int virq,
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index b1297ab1599b..f76ee39cb337 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -675,7 +675,8 @@ static int ipic_host_match(struct irq_domain *h, struct device_node *node,
675 enum irq_domain_bus_token bus_token) 675 enum irq_domain_bus_token bus_token)
676{ 676{
677 /* Exact match, unless ipic node is NULL */ 677 /* Exact match, unless ipic node is NULL */
678 return h->of_node == NULL || h->of_node == node; 678 struct device_node *of_node = irq_domain_get_of_node(h);
679 return of_node == NULL || of_node == node;
679} 680}
680 681
681static int ipic_host_map(struct irq_domain *h, unsigned int virq, 682static int ipic_host_map(struct irq_domain *h, unsigned int virq,
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 537e5db85a06..cecd1156c185 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1011,7 +1011,8 @@ static int mpic_host_match(struct irq_domain *h, struct device_node *node,
1011 enum irq_domain_bus_token bus_token) 1011 enum irq_domain_bus_token bus_token)
1012{ 1012{
1013 /* Exact match, unless mpic node is NULL */ 1013 /* Exact match, unless mpic node is NULL */
1014 return h->of_node == NULL || h->of_node == node; 1014 struct device_node *of_node = irq_domain_get_of_node(h);
1015 return of_node == NULL || of_node == node;
1015} 1016}
1016 1017
1017static int mpic_host_map(struct irq_domain *h, unsigned int virq, 1018static int mpic_host_map(struct irq_domain *h, unsigned int virq,
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index 7dc39f35a4cc..1d48a5385905 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -84,7 +84,7 @@ int mpic_msi_init_allocator(struct mpic *mpic)
84 int rc; 84 int rc;
85 85
86 rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->num_sources, 86 rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->num_sources,
87 mpic->irqhost->of_node); 87 irq_domain_get_of_node(mpic->irqhost));
88 if (rc) 88 if (rc)
89 return rc; 89 return rc;
90 90
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index fbcc1f855a7f..ef36f16f9f6f 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -248,7 +248,8 @@ static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
248 enum irq_domain_bus_token bus_token) 248 enum irq_domain_bus_token bus_token)
249{ 249{
250 /* Exact match, unless qe_ic node is NULL */ 250 /* Exact match, unless qe_ic node is NULL */
251 return h->of_node == NULL || h->of_node == node; 251 struct device_node *of_node = irq_domain_get_of_node(h);
252 return of_node == NULL || of_node == node;
252} 253}
253 254
254static int qe_ic_host_map(struct irq_domain *h, unsigned int virq, 255static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index a9563409c36e..929c147e07b4 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -72,6 +72,7 @@ struct cpu_hw_events {
72 atomic_t ctr_set[CPUMF_CTR_SET_MAX]; 72 atomic_t ctr_set[CPUMF_CTR_SET_MAX];
73 u64 state, tx_state; 73 u64 state, tx_state;
74 unsigned int flags; 74 unsigned int flags;
75 unsigned int txn_flags;
75}; 76};
76static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { 77static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
77 .ctr_set = { 78 .ctr_set = {
@@ -82,6 +83,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
82 }, 83 },
83 .state = 0, 84 .state = 0,
84 .flags = 0, 85 .flags = 0,
86 .txn_flags = 0,
85}; 87};
86 88
87static int get_counter_set(u64 event) 89static int get_counter_set(u64 event)
@@ -538,7 +540,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
538 * For group events transaction, the authorization check is 540 * For group events transaction, the authorization check is
539 * done in cpumf_pmu_commit_txn(). 541 * done in cpumf_pmu_commit_txn().
540 */ 542 */
541 if (!(cpuhw->flags & PERF_EVENT_TXN)) 543 if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD))
542 if (validate_ctr_auth(&event->hw)) 544 if (validate_ctr_auth(&event->hw))
543 return -ENOENT; 545 return -ENOENT;
544 546
@@ -576,13 +578,22 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
576/* 578/*
577 * Start group events scheduling transaction. 579 * Start group events scheduling transaction.
578 * Set flags to perform a single test at commit time. 580 * Set flags to perform a single test at commit time.
581 *
582 * We only support PERF_PMU_TXN_ADD transactions. Save the
583 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
584 * transactions.
579 */ 585 */
580static void cpumf_pmu_start_txn(struct pmu *pmu) 586static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
581{ 587{
582 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 588 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
583 589
590 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
591
592 cpuhw->txn_flags = txn_flags;
593 if (txn_flags & ~PERF_PMU_TXN_ADD)
594 return;
595
584 perf_pmu_disable(pmu); 596 perf_pmu_disable(pmu);
585 cpuhw->flags |= PERF_EVENT_TXN;
586 cpuhw->tx_state = cpuhw->state; 597 cpuhw->tx_state = cpuhw->state;
587} 598}
588 599
@@ -593,11 +604,18 @@ static void cpumf_pmu_start_txn(struct pmu *pmu)
593 */ 604 */
594static void cpumf_pmu_cancel_txn(struct pmu *pmu) 605static void cpumf_pmu_cancel_txn(struct pmu *pmu)
595{ 606{
607 unsigned int txn_flags;
596 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 608 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
597 609
610 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
611
612 txn_flags = cpuhw->txn_flags;
613 cpuhw->txn_flags = 0;
614 if (txn_flags & ~PERF_PMU_TXN_ADD)
615 return;
616
598 WARN_ON(cpuhw->tx_state != cpuhw->state); 617 WARN_ON(cpuhw->tx_state != cpuhw->state);
599 618
600 cpuhw->flags &= ~PERF_EVENT_TXN;
601 perf_pmu_enable(pmu); 619 perf_pmu_enable(pmu);
602} 620}
603 621
@@ -611,13 +629,20 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
611 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 629 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
612 u64 state; 630 u64 state;
613 631
632 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
633
634 if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
635 cpuhw->txn_flags = 0;
636 return 0;
637 }
638
614 /* check if the updated state can be scheduled */ 639 /* check if the updated state can be scheduled */
615 state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); 640 state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
616 state >>= CPUMF_LCCTL_ENABLE_SHIFT; 641 state >>= CPUMF_LCCTL_ENABLE_SHIFT;
617 if ((state & cpuhw->info.auth_ctl) != state) 642 if ((state & cpuhw->info.auth_ctl) != state)
618 return -ENOENT; 643 return -ENOENT;
619 644
620 cpuhw->flags &= ~PERF_EVENT_TXN; 645 cpuhw->txn_flags = 0;
621 perf_pmu_enable(pmu); 646 perf_pmu_enable(pmu);
622 return 0; 647 return 0;
623} 648}
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 05b9f74ce2d5..c399e1c55685 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -14,8 +14,8 @@
14 14
15#define ATOMIC_INIT(i) { (i) } 15#define ATOMIC_INIT(i) { (i) }
16 16
17#define atomic_read(v) ACCESS_ONCE((v)->counter) 17#define atomic_read(v) READ_ONCE((v)->counter)
18#define atomic_set(v,i) ((v)->counter = (i)) 18#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
19 19
20#if defined(CONFIG_GUSA_RB) 20#if defined(CONFIG_GUSA_RB)
21#include <asm/atomic-grb.h> 21#include <asm/atomic-grb.h>
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 917084ace49d..f2fbf9e16faf 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -14,11 +14,11 @@
14#define ATOMIC_INIT(i) { (i) } 14#define ATOMIC_INIT(i) { (i) }
15#define ATOMIC64_INIT(i) { (i) } 15#define ATOMIC64_INIT(i) { (i) }
16 16
17#define atomic_read(v) ACCESS_ONCE((v)->counter) 17#define atomic_read(v) READ_ONCE((v)->counter)
18#define atomic64_read(v) ACCESS_ONCE((v)->counter) 18#define atomic64_read(v) READ_ONCE((v)->counter)
19 19
20#define atomic_set(v, i) (((v)->counter) = i) 20#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
21#define atomic64_set(v, i) (((v)->counter) = i) 21#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
22 22
23#define ATOMIC_OP(op) \ 23#define ATOMIC_OP(op) \
24void atomic_##op(int, atomic_t *); \ 24void atomic_##op(int, atomic_t *); \
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 689db65f8529..b0da5aedb336 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -108,7 +108,7 @@ struct cpu_hw_events {
108 /* Enabled/disable state. */ 108 /* Enabled/disable state. */
109 int enabled; 109 int enabled;
110 110
111 unsigned int group_flag; 111 unsigned int txn_flags;
112}; 112};
113static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 113static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
114 114
@@ -1379,7 +1379,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1379 * skip the schedulability test here, it will be performed 1379 * skip the schedulability test here, it will be performed
1380 * at commit time(->commit_txn) as a whole 1380 * at commit time(->commit_txn) as a whole
1381 */ 1381 */
1382 if (cpuc->group_flag & PERF_EVENT_TXN) 1382 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1383 goto nocheck; 1383 goto nocheck;
1384 1384
1385 if (check_excludes(cpuc->event, n0, 1)) 1385 if (check_excludes(cpuc->event, n0, 1))
@@ -1494,12 +1494,17 @@ static int sparc_pmu_event_init(struct perf_event *event)
1494 * Set the flag to make pmu::enable() not perform the 1494 * Set the flag to make pmu::enable() not perform the
1495 * schedulability test, it will be performed at commit time 1495 * schedulability test, it will be performed at commit time
1496 */ 1496 */
1497static void sparc_pmu_start_txn(struct pmu *pmu) 1497static void sparc_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
1498{ 1498{
1499 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 1499 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1500 1500
1501 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
1502
1503 cpuhw->txn_flags = txn_flags;
1504 if (txn_flags & ~PERF_PMU_TXN_ADD)
1505 return;
1506
1501 perf_pmu_disable(pmu); 1507 perf_pmu_disable(pmu);
1502 cpuhw->group_flag |= PERF_EVENT_TXN;
1503} 1508}
1504 1509
1505/* 1510/*
@@ -1510,8 +1515,15 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
1510static void sparc_pmu_cancel_txn(struct pmu *pmu) 1515static void sparc_pmu_cancel_txn(struct pmu *pmu)
1511{ 1516{
1512 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 1517 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1518 unsigned int txn_flags;
1519
1520 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
1521
1522 txn_flags = cpuhw->txn_flags;
1523 cpuhw->txn_flags = 0;
1524 if (txn_flags & ~PERF_PMU_TXN_ADD)
1525 return;
1513 1526
1514 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1515 perf_pmu_enable(pmu); 1527 perf_pmu_enable(pmu);
1516} 1528}
1517 1529
@@ -1528,14 +1540,20 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)
1528 if (!sparc_pmu) 1540 if (!sparc_pmu)
1529 return -EINVAL; 1541 return -EINVAL;
1530 1542
1531 cpuc = this_cpu_ptr(&cpu_hw_events); 1543 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1544
1545 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
1546 cpuc->txn_flags = 0;
1547 return 0;
1548 }
1549
1532 n = cpuc->n_events; 1550 n = cpuc->n_events;
1533 if (check_excludes(cpuc->event, 0, n)) 1551 if (check_excludes(cpuc->event, 0, n))
1534 return -EINVAL; 1552 return -EINVAL;
1535 if (sparc_check_constraints(cpuc->event, cpuc->events, n)) 1553 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1536 return -EAGAIN; 1554 return -EAGAIN;
1537 1555
1538 cpuc->group_flag &= ~PERF_EVENT_TXN; 1556 cpuc->txn_flags = 0;
1539 perf_pmu_enable(pmu); 1557 perf_pmu_enable(pmu);
1540 return 0; 1558 return 0;
1541} 1559}
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index 709798460763..9fc0107a9c5e 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -34,7 +34,7 @@
34 */ 34 */
35static inline int atomic_read(const atomic_t *v) 35static inline int atomic_read(const atomic_t *v)
36{ 36{
37 return ACCESS_ONCE(v->counter); 37 return READ_ONCE(v->counter);
38} 38}
39 39
40/** 40/**
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 096a56d6ead4..51cabc26e387 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -24,7 +24,7 @@
24 24
25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ 25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
26 26
27#define atomic_set(v, i) ((v)->counter = (i)) 27#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
28 28
29/* 29/*
30 * The smp_mb() operations throughout are to support the fact that 30 * The smp_mb() operations throughout are to support the fact that
@@ -82,8 +82,8 @@ static inline void atomic_xor(int i, atomic_t *v)
82 82
83#define ATOMIC64_INIT(i) { (i) } 83#define ATOMIC64_INIT(i) { (i) }
84 84
85#define atomic64_read(v) ((v)->counter) 85#define atomic64_read(v) READ_ONCE((v)->counter)
86#define atomic64_set(v, i) ((v)->counter = (i)) 86#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
87 87
88static inline void atomic64_add(long i, atomic64_t *v) 88static inline void atomic64_add(long i, atomic64_t *v)
89{ 89{
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 098ab3333e7c..e3abe6f3156d 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -70,8 +70,8 @@ KBUILD_AFLAGS += $(ARCH_INCLUDE)
70 70
71USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \ 71USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
72 $(ARCH_INCLUDE) $(MODE_INCLUDE) $(filter -I%,$(CFLAGS)) \ 72 $(ARCH_INCLUDE) $(MODE_INCLUDE) $(filter -I%,$(CFLAGS)) \
73 -D_FILE_OFFSET_BITS=64 -idirafter include \ 73 -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \
74 -D__KERNEL__ -D__UM_HOST__ 74 -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__
75 75
76#This will adjust *FLAGS accordingly to the platform. 76#This will adjust *FLAGS accordingly to the platform.
77include $(ARCH_DIR)/Makefile-os-$(OS) 77include $(ARCH_DIR)/Makefile-os-$(OS)
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index d8a9fce6ee2e..98783dd0fa2e 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -220,7 +220,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
220 show_regs(container_of(regs, struct pt_regs, regs)); 220 show_regs(container_of(regs, struct pt_regs, regs));
221 panic("Segfault with no mm"); 221 panic("Segfault with no mm");
222 } 222 }
223 else if (!is_user && address < TASK_SIZE) { 223 else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) {
224 show_regs(container_of(regs, struct pt_regs, regs)); 224 show_regs(container_of(regs, struct pt_regs, regs));
225 panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx", 225 panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx",
226 address, ip); 226 address, ip);
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index e3ee4a51ef63..3f02d4232812 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -96,7 +96,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
96 "ret = %d\n", -n); 96 "ret = %d\n", -n);
97 ret = n; 97 ret = n;
98 } 98 }
99 CATCH_EINTR(waitpid(pid, NULL, __WCLONE)); 99 CATCH_EINTR(waitpid(pid, NULL, __WALL));
100 } 100 }
101 101
102out_free2: 102out_free2:
@@ -129,7 +129,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
129 return err; 129 return err;
130 } 130 }
131 if (stack_out == NULL) { 131 if (stack_out == NULL) {
132 CATCH_EINTR(pid = waitpid(pid, &status, __WCLONE)); 132 CATCH_EINTR(pid = waitpid(pid, &status, __WALL));
133 if (pid < 0) { 133 if (pid < 0) {
134 err = -errno; 134 err = -errno;
135 printk(UM_KERN_ERR "run_helper_thread - wait failed, " 135 printk(UM_KERN_ERR "run_helper_thread - wait failed, "
@@ -148,7 +148,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
148int helper_wait(int pid) 148int helper_wait(int pid)
149{ 149{
150 int ret, status; 150 int ret, status;
151 int wflags = __WCLONE; 151 int wflags = __WALL;
152 152
153 CATCH_EINTR(ret = waitpid(pid, &status, wflags)); 153 CATCH_EINTR(ret = waitpid(pid, &status, wflags));
154 if (ret < 0) { 154 if (ret < 0) {
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 96d058a87100..db3622f22b61 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1123,8 +1123,10 @@ config X86_REBOOTFIXUPS
1123 Say N otherwise. 1123 Say N otherwise.
1124 1124
1125config MICROCODE 1125config MICROCODE
1126 tristate "CPU microcode loading support" 1126 bool "CPU microcode loading support"
1127 default y
1127 depends on CPU_SUP_AMD || CPU_SUP_INTEL 1128 depends on CPU_SUP_AMD || CPU_SUP_INTEL
1129 depends on BLK_DEV_INITRD
1128 select FW_LOADER 1130 select FW_LOADER
1129 ---help--- 1131 ---help---
1130 1132
@@ -1166,24 +1168,6 @@ config MICROCODE_OLD_INTERFACE
1166 def_bool y 1168 def_bool y
1167 depends on MICROCODE 1169 depends on MICROCODE
1168 1170
1169config MICROCODE_INTEL_EARLY
1170 bool
1171
1172config MICROCODE_AMD_EARLY
1173 bool
1174
1175config MICROCODE_EARLY
1176 bool "Early load microcode"
1177 depends on MICROCODE=y && BLK_DEV_INITRD
1178 select MICROCODE_INTEL_EARLY if MICROCODE_INTEL
1179 select MICROCODE_AMD_EARLY if MICROCODE_AMD
1180 default y
1181 help
1182 This option provides functionality to read additional microcode data
1183 at the beginning of initrd image. The data tells kernel to load
1184 microcode to CPU's as early as possible. No functional change if no
1185 microcode data is glued to the initrd, therefore it's safe to say Y.
1186
1187config X86_MSR 1171config X86_MSR
1188 tristate "/dev/cpu/*/msr - Model-specific register support" 1172 tristate "/dev/cpu/*/msr - Model-specific register support"
1189 ---help--- 1173 ---help---
@@ -2043,6 +2027,55 @@ config COMPAT_VDSO
2043 If unsure, say N: if you are compiling your own kernel, you 2027 If unsure, say N: if you are compiling your own kernel, you
2044 are unlikely to be using a buggy version of glibc. 2028 are unlikely to be using a buggy version of glibc.
2045 2029
2030choice
2031 prompt "vsyscall table for legacy applications"
2032 depends on X86_64
2033 default LEGACY_VSYSCALL_EMULATE
2034 help
2035 Legacy user code that does not know how to find the vDSO expects
2036 to be able to issue three syscalls by calling fixed addresses in
2037 kernel space. Since this location is not randomized with ASLR,
2038 it can be used to assist security vulnerability exploitation.
2039
2040 This setting can be changed at boot time via the kernel command
2041 line parameter vsyscall=[native|emulate|none].
2042
2043 On a system with recent enough glibc (2.14 or newer) and no
2044 static binaries, you can say None without a performance penalty
2045 to improve security.
2046
2047 If unsure, select "Emulate".
2048
2049 config LEGACY_VSYSCALL_NATIVE
2050 bool "Native"
2051 help
2052 Actual executable code is located in the fixed vsyscall
2053 address mapping, implementing time() efficiently. Since
2054 this makes the mapping executable, it can be used during
2055 security vulnerability exploitation (traditionally as
2056 ROP gadgets). This configuration is not recommended.
2057
2058 config LEGACY_VSYSCALL_EMULATE
2059 bool "Emulate"
2060 help
2061 The kernel traps and emulates calls into the fixed
2062 vsyscall address mapping. This makes the mapping
2063 non-executable, but it still contains known contents,
2064 which could be used in certain rare security vulnerability
2065 exploits. This configuration is recommended when userspace
2066 still uses the vsyscall area.
2067
2068 config LEGACY_VSYSCALL_NONE
2069 bool "None"
2070 help
2071 There will be no vsyscall mapping at all. This will
2072 eliminate any risk of ASLR bypass due to the vsyscall
2073 fixed address mapping. Attempts to use the vsyscalls
2074 will be reported to dmesg, so that either old or
2075 malicious userspace programs can be identified.
2076
2077endchoice
2078
2046config CMDLINE_BOOL 2079config CMDLINE_BOOL
2047 bool "Built-in kernel command line" 2080 bool "Built-in kernel command line"
2048 ---help--- 2081 ---help---
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 747860c696e1..2dfaa72260b4 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -159,6 +159,12 @@ endif
159sp-$(CONFIG_X86_32) := esp 159sp-$(CONFIG_X86_32) := esp
160sp-$(CONFIG_X86_64) := rsp 160sp-$(CONFIG_X86_64) := rsp
161 161
162# do binutils support CFI?
163cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
164# is .cfi_signal_frame supported too?
165cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
166cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
167
162# does binutils support specific instructions? 168# does binutils support specific instructions?
163asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) 169asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
164asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) 170asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
@@ -166,8 +172,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
166avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) 172avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
167avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) 173avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
168 174
169KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) 175KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
170KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) 176KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
171 177
172LDFLAGS := -m elf_$(UTS_MACHINE) 178LDFLAGS := -m elf_$(UTS_MACHINE)
173 179
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index ee1b6d346b98..583d539a4197 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -624,7 +624,7 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
624static efi_status_t 624static efi_status_t
625__gop_query32(struct efi_graphics_output_protocol_32 *gop32, 625__gop_query32(struct efi_graphics_output_protocol_32 *gop32,
626 struct efi_graphics_output_mode_info **info, 626 struct efi_graphics_output_mode_info **info,
627 unsigned long *size, u32 *fb_base) 627 unsigned long *size, u64 *fb_base)
628{ 628{
629 struct efi_graphics_output_protocol_mode_32 *mode; 629 struct efi_graphics_output_protocol_mode_32 *mode;
630 efi_status_t status; 630 efi_status_t status;
@@ -650,7 +650,8 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
650 unsigned long nr_gops; 650 unsigned long nr_gops;
651 u16 width, height; 651 u16 width, height;
652 u32 pixels_per_scan_line; 652 u32 pixels_per_scan_line;
653 u32 fb_base; 653 u32 ext_lfb_base;
654 u64 fb_base;
654 struct efi_pixel_bitmask pixel_info; 655 struct efi_pixel_bitmask pixel_info;
655 int pixel_format; 656 int pixel_format;
656 efi_status_t status; 657 efi_status_t status;
@@ -667,6 +668,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
667 bool conout_found = false; 668 bool conout_found = false;
668 void *dummy = NULL; 669 void *dummy = NULL;
669 u32 h = handles[i]; 670 u32 h = handles[i];
671 u64 current_fb_base;
670 672
671 status = efi_call_early(handle_protocol, h, 673 status = efi_call_early(handle_protocol, h,
672 proto, (void **)&gop32); 674 proto, (void **)&gop32);
@@ -678,7 +680,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
678 if (status == EFI_SUCCESS) 680 if (status == EFI_SUCCESS)
679 conout_found = true; 681 conout_found = true;
680 682
681 status = __gop_query32(gop32, &info, &size, &fb_base); 683 status = __gop_query32(gop32, &info, &size, &current_fb_base);
682 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 684 if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
683 /* 685 /*
684 * Systems that use the UEFI Console Splitter may 686 * Systems that use the UEFI Console Splitter may
@@ -692,6 +694,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
692 pixel_format = info->pixel_format; 694 pixel_format = info->pixel_format;
693 pixel_info = info->pixel_information; 695 pixel_info = info->pixel_information;
694 pixels_per_scan_line = info->pixels_per_scan_line; 696 pixels_per_scan_line = info->pixels_per_scan_line;
697 fb_base = current_fb_base;
695 698
696 /* 699 /*
697 * Once we've found a GOP supporting ConOut, 700 * Once we've found a GOP supporting ConOut,
@@ -713,6 +716,13 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
713 si->lfb_width = width; 716 si->lfb_width = width;
714 si->lfb_height = height; 717 si->lfb_height = height;
715 si->lfb_base = fb_base; 718 si->lfb_base = fb_base;
719
720 ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
721 if (ext_lfb_base) {
722 si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
723 si->ext_lfb_base = ext_lfb_base;
724 }
725
716 si->pages = 1; 726 si->pages = 1;
717 727
718 setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format); 728 setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
@@ -727,7 +737,7 @@ out:
727static efi_status_t 737static efi_status_t
728__gop_query64(struct efi_graphics_output_protocol_64 *gop64, 738__gop_query64(struct efi_graphics_output_protocol_64 *gop64,
729 struct efi_graphics_output_mode_info **info, 739 struct efi_graphics_output_mode_info **info,
730 unsigned long *size, u32 *fb_base) 740 unsigned long *size, u64 *fb_base)
731{ 741{
732 struct efi_graphics_output_protocol_mode_64 *mode; 742 struct efi_graphics_output_protocol_mode_64 *mode;
733 efi_status_t status; 743 efi_status_t status;
@@ -753,7 +763,8 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
753 unsigned long nr_gops; 763 unsigned long nr_gops;
754 u16 width, height; 764 u16 width, height;
755 u32 pixels_per_scan_line; 765 u32 pixels_per_scan_line;
756 u32 fb_base; 766 u32 ext_lfb_base;
767 u64 fb_base;
757 struct efi_pixel_bitmask pixel_info; 768 struct efi_pixel_bitmask pixel_info;
758 int pixel_format; 769 int pixel_format;
759 efi_status_t status; 770 efi_status_t status;
@@ -770,6 +781,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
770 bool conout_found = false; 781 bool conout_found = false;
771 void *dummy = NULL; 782 void *dummy = NULL;
772 u64 h = handles[i]; 783 u64 h = handles[i];
784 u64 current_fb_base;
773 785
774 status = efi_call_early(handle_protocol, h, 786 status = efi_call_early(handle_protocol, h,
775 proto, (void **)&gop64); 787 proto, (void **)&gop64);
@@ -781,7 +793,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
781 if (status == EFI_SUCCESS) 793 if (status == EFI_SUCCESS)
782 conout_found = true; 794 conout_found = true;
783 795
784 status = __gop_query64(gop64, &info, &size, &fb_base); 796 status = __gop_query64(gop64, &info, &size, &current_fb_base);
785 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 797 if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
786 /* 798 /*
787 * Systems that use the UEFI Console Splitter may 799 * Systems that use the UEFI Console Splitter may
@@ -795,6 +807,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
795 pixel_format = info->pixel_format; 807 pixel_format = info->pixel_format;
796 pixel_info = info->pixel_information; 808 pixel_info = info->pixel_information;
797 pixels_per_scan_line = info->pixels_per_scan_line; 809 pixels_per_scan_line = info->pixels_per_scan_line;
810 fb_base = current_fb_base;
798 811
799 /* 812 /*
800 * Once we've found a GOP supporting ConOut, 813 * Once we've found a GOP supporting ConOut,
@@ -816,6 +829,13 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
816 si->lfb_width = width; 829 si->lfb_width = width;
817 si->lfb_height = height; 830 si->lfb_height = height;
818 si->lfb_base = fb_base; 831 si->lfb_base = fb_base;
832
833 ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
834 if (ext_lfb_base) {
835 si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
836 si->ext_lfb_base = ext_lfb_base;
837 }
838
819 si->pages = 1; 839 si->pages = 1;
820 840
821 setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format); 841 setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 2d6b309c8e9a..6236b9ec4b76 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -154,7 +154,7 @@ extra_header_fields:
154#else 154#else
155 .quad 0 # ImageBase 155 .quad 0 # ImageBase
156#endif 156#endif
157 .long CONFIG_PHYSICAL_ALIGN # SectionAlignment 157 .long 0x20 # SectionAlignment
158 .long 0x20 # FileAlignment 158 .long 0x20 # FileAlignment
159 .word 0 # MajorOperatingSystemVersion 159 .word 0 # MajorOperatingSystemVersion
160 .word 0 # MinorOperatingSystemVersion 160 .word 0 # MinorOperatingSystemVersion
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 80dcc9261ca3..a89fdbc1f0be 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -24,10 +24,19 @@
24 24
25#include <asm/desc.h> 25#include <asm/desc.h>
26#include <asm/traps.h> 26#include <asm/traps.h>
27#include <asm/vdso.h>
28#include <asm/uaccess.h>
27 29
28#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
29#include <trace/events/syscalls.h> 31#include <trace/events/syscalls.h>
30 32
33static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
34{
35 unsigned long top_of_stack =
36 (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
37 return (struct thread_info *)(top_of_stack - THREAD_SIZE);
38}
39
31#ifdef CONFIG_CONTEXT_TRACKING 40#ifdef CONFIG_CONTEXT_TRACKING
32/* Called on entry from user mode with IRQs off. */ 41/* Called on entry from user mode with IRQs off. */
33__visible void enter_from_user_mode(void) 42__visible void enter_from_user_mode(void)
@@ -66,13 +75,14 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
66 */ 75 */
67unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) 76unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
68{ 77{
78 struct thread_info *ti = pt_regs_to_thread_info(regs);
69 unsigned long ret = 0; 79 unsigned long ret = 0;
70 u32 work; 80 u32 work;
71 81
72 BUG_ON(regs != task_pt_regs(current)); 82 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
83 BUG_ON(regs != task_pt_regs(current));
73 84
74 work = ACCESS_ONCE(current_thread_info()->flags) & 85 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
75 _TIF_WORK_SYSCALL_ENTRY;
76 86
77#ifdef CONFIG_CONTEXT_TRACKING 87#ifdef CONFIG_CONTEXT_TRACKING
78 /* 88 /*
@@ -154,11 +164,12 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
154long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch, 164long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
155 unsigned long phase1_result) 165 unsigned long phase1_result)
156{ 166{
167 struct thread_info *ti = pt_regs_to_thread_info(regs);
157 long ret = 0; 168 long ret = 0;
158 u32 work = ACCESS_ONCE(current_thread_info()->flags) & 169 u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
159 _TIF_WORK_SYSCALL_ENTRY;
160 170
161 BUG_ON(regs != task_pt_regs(current)); 171 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
172 BUG_ON(regs != task_pt_regs(current));
162 173
163 /* 174 /*
164 * If we stepped into a sysenter/syscall insn, it trapped in 175 * If we stepped into a sysenter/syscall insn, it trapped in
@@ -207,19 +218,12 @@ long syscall_trace_enter(struct pt_regs *regs)
207 return syscall_trace_enter_phase2(regs, arch, phase1_result); 218 return syscall_trace_enter_phase2(regs, arch, phase1_result);
208} 219}
209 220
210static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs) 221#define EXIT_TO_USERMODE_LOOP_FLAGS \
211{ 222 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
212 unsigned long top_of_stack = 223 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
213 (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
214 return (struct thread_info *)(top_of_stack - THREAD_SIZE);
215}
216 224
217/* Called with IRQs disabled. */ 225static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
218__visible void prepare_exit_to_usermode(struct pt_regs *regs)
219{ 226{
220 if (WARN_ON(!irqs_disabled()))
221 local_irq_disable();
222
223 /* 227 /*
224 * In order to return to user mode, we need to have IRQs off with 228 * In order to return to user mode, we need to have IRQs off with
225 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY, 229 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
@@ -229,14 +233,6 @@ __visible void prepare_exit_to_usermode(struct pt_regs *regs)
229 * work to clear some of the flags can sleep. 233 * work to clear some of the flags can sleep.
230 */ 234 */
231 while (true) { 235 while (true) {
232 u32 cached_flags =
233 READ_ONCE(pt_regs_to_thread_info(regs)->flags);
234
235 if (!(cached_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME |
236 _TIF_UPROBE | _TIF_NEED_RESCHED |
237 _TIF_USER_RETURN_NOTIFY)))
238 break;
239
240 /* We have work to do. */ 236 /* We have work to do. */
241 local_irq_enable(); 237 local_irq_enable();
242 238
@@ -260,50 +256,81 @@ __visible void prepare_exit_to_usermode(struct pt_regs *regs)
260 256
261 /* Disable IRQs and retry */ 257 /* Disable IRQs and retry */
262 local_irq_disable(); 258 local_irq_disable();
259
260 cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags);
261
262 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
263 break;
264
263 } 265 }
266}
267
268/* Called with IRQs disabled. */
269__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
270{
271 u32 cached_flags;
272
273 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
274 local_irq_disable();
275
276 lockdep_sys_exit();
277
278 cached_flags =
279 READ_ONCE(pt_regs_to_thread_info(regs)->flags);
280
281 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
282 exit_to_usermode_loop(regs, cached_flags);
264 283
265 user_enter(); 284 user_enter();
266} 285}
267 286
287#define SYSCALL_EXIT_WORK_FLAGS \
288 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
289 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
290
291static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
292{
293 bool step;
294
295 audit_syscall_exit(regs);
296
297 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
298 trace_sys_exit(regs, regs->ax);
299
300 /*
301 * If TIF_SYSCALL_EMU is set, we only get here because of
302 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
303 * We already reported this syscall instruction in
304 * syscall_trace_enter().
305 */
306 step = unlikely(
307 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
308 == _TIF_SINGLESTEP);
309 if (step || cached_flags & _TIF_SYSCALL_TRACE)
310 tracehook_report_syscall_exit(regs, step);
311}
312
268/* 313/*
269 * Called with IRQs on and fully valid regs. Returns with IRQs off in a 314 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
270 * state such that we can immediately switch to user mode. 315 * state such that we can immediately switch to user mode.
271 */ 316 */
272__visible void syscall_return_slowpath(struct pt_regs *regs) 317__visible inline void syscall_return_slowpath(struct pt_regs *regs)
273{ 318{
274 struct thread_info *ti = pt_regs_to_thread_info(regs); 319 struct thread_info *ti = pt_regs_to_thread_info(regs);
275 u32 cached_flags = READ_ONCE(ti->flags); 320 u32 cached_flags = READ_ONCE(ti->flags);
276 bool step;
277 321
278 CT_WARN_ON(ct_state() != CONTEXT_KERNEL); 322 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
279 323
280 if (WARN(irqs_disabled(), "syscall %ld left IRQs disabled", 324 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
281 regs->orig_ax)) 325 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
282 local_irq_enable(); 326 local_irq_enable();
283 327
284 /* 328 /*
285 * First do one-time work. If these work items are enabled, we 329 * First do one-time work. If these work items are enabled, we
286 * want to run them exactly once per syscall exit with IRQs on. 330 * want to run them exactly once per syscall exit with IRQs on.
287 */ 331 */
288 if (cached_flags & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | 332 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
289 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)) { 333 syscall_slow_exit_work(regs, cached_flags);
290 audit_syscall_exit(regs);
291
292 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
293 trace_sys_exit(regs, regs->ax);
294
295 /*
296 * If TIF_SYSCALL_EMU is set, we only get here because of
297 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
298 * We already reported this syscall instruction in
299 * syscall_trace_enter().
300 */
301 step = unlikely(
302 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
303 == _TIF_SINGLESTEP);
304 if (step || cached_flags & _TIF_SYSCALL_TRACE)
305 tracehook_report_syscall_exit(regs, step);
306 }
307 334
308#ifdef CONFIG_COMPAT 335#ifdef CONFIG_COMPAT
309 /* 336 /*
@@ -316,3 +343,144 @@ __visible void syscall_return_slowpath(struct pt_regs *regs)
316 local_irq_disable(); 343 local_irq_disable();
317 prepare_exit_to_usermode(regs); 344 prepare_exit_to_usermode(regs);
318} 345}
346
347#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
348/*
349 * Does a 32-bit syscall. Called with IRQs on and does all entry and
350 * exit work and returns with IRQs off. This function is extremely hot
351 * in workloads that use it, and it's usually called from
352 * do_fast_syscall_32, so forcibly inline it to improve performance.
353 */
354#ifdef CONFIG_X86_32
355/* 32-bit kernels use a trap gate for INT80, and the asm code calls here. */
356__visible
357#else
358/* 64-bit kernels use do_syscall_32_irqs_off() instead. */
359static
360#endif
361__always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
362{
363 struct thread_info *ti = pt_regs_to_thread_info(regs);
364 unsigned int nr = (unsigned int)regs->orig_ax;
365
366#ifdef CONFIG_IA32_EMULATION
367 ti->status |= TS_COMPAT;
368#endif
369
370 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
371 /*
372 * Subtlety here: if ptrace pokes something larger than
373 * 2^32-1 into orig_ax, this truncates it. This may or
374 * may not be necessary, but it matches the old asm
375 * behavior.
376 */
377 nr = syscall_trace_enter(regs);
378 }
379
380 if (likely(nr < IA32_NR_syscalls)) {
381 /*
382 * It's possible that a 32-bit syscall implementation
383 * takes a 64-bit parameter but nonetheless assumes that
384 * the high bits are zero. Make sure we zero-extend all
385 * of the args.
386 */
387 regs->ax = ia32_sys_call_table[nr](
388 (unsigned int)regs->bx, (unsigned int)regs->cx,
389 (unsigned int)regs->dx, (unsigned int)regs->si,
390 (unsigned int)regs->di, (unsigned int)regs->bp);
391 }
392
393 syscall_return_slowpath(regs);
394}
395
396#ifdef CONFIG_X86_64
397/* Handles INT80 on 64-bit kernels */
398__visible void do_syscall_32_irqs_off(struct pt_regs *regs)
399{
400 local_irq_enable();
401 do_syscall_32_irqs_on(regs);
402}
403#endif
404
405/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
406__visible long do_fast_syscall_32(struct pt_regs *regs)
407{
408 /*
409 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
410 * convention. Adjust regs so it looks like we entered using int80.
411 */
412
413 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
414 vdso_image_32.sym_int80_landing_pad;
415
416 /*
417 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
418 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
419 * Fix it up.
420 */
421 regs->ip = landing_pad;
422
423 /*
424 * Fetch ECX from where the vDSO stashed it.
425 *
426 * WARNING: We are in CONTEXT_USER and RCU isn't paying attention!
427 */
428 local_irq_enable();
429 if (
430#ifdef CONFIG_X86_64
431 /*
432 * Micro-optimization: the pointer we're following is explicitly
433 * 32 bits, so it can't be out of range.
434 */
435 __get_user(*(u32 *)&regs->cx,
436 (u32 __user __force *)(unsigned long)(u32)regs->sp)
437#else
438 get_user(*(u32 *)&regs->cx,
439 (u32 __user __force *)(unsigned long)(u32)regs->sp)
440#endif
441 ) {
442
443 /* User code screwed up. */
444 local_irq_disable();
445 regs->ax = -EFAULT;
446#ifdef CONFIG_CONTEXT_TRACKING
447 enter_from_user_mode();
448#endif
449 prepare_exit_to_usermode(regs);
450 return 0; /* Keep it simple: use IRET. */
451 }
452
453 /* Now this is just like a normal syscall. */
454 do_syscall_32_irqs_on(regs);
455
456#ifdef CONFIG_X86_64
457 /*
458 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
459 * SYSRETL is available on all 64-bit CPUs, so we don't need to
460 * bother with SYSEXIT.
461 *
462 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
463 * because the ECX fixup above will ensure that this is essentially
464 * never the case.
465 */
466 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
467 regs->ip == landing_pad &&
468 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
469#else
470 /*
471 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
472 *
473 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
474 * because the ECX fixup above will ensure that this is essentially
475 * never the case.
476 *
477 * We don't allow syscalls at all from VM86 mode, but we still
478 * need to check VM, because we might be returning from sys_vm86.
479 */
480 return static_cpu_has(X86_FEATURE_SEP) &&
481 regs->cs == __USER_CS && regs->ss == __USER_DS &&
482 regs->ip == landing_pad &&
483 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
484#endif
485}
486#endif
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index b2909bf8cf70..3eb572ed3d7a 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -3,7 +3,7 @@
3 * 3 *
4 * entry_32.S contains the system-call and low-level fault and trap handling routines. 4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
5 * 5 *
6 * Stack layout in 'syscall_exit': 6 * Stack layout while running C code:
7 * ptrace needs to have all registers on the stack. 7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be 8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(), 9 * updated in fork.c:copy_process(), signal.c:do_signal(),
@@ -153,13 +153,13 @@
153 153
154#endif /* CONFIG_X86_32_LAZY_GS */ 154#endif /* CONFIG_X86_32_LAZY_GS */
155 155
156.macro SAVE_ALL 156.macro SAVE_ALL pt_regs_ax=%eax
157 cld 157 cld
158 PUSH_GS 158 PUSH_GS
159 pushl %fs 159 pushl %fs
160 pushl %es 160 pushl %es
161 pushl %ds 161 pushl %ds
162 pushl %eax 162 pushl \pt_regs_ax
163 pushl %ebp 163 pushl %ebp
164 pushl %edi 164 pushl %edi
165 pushl %esi 165 pushl %esi
@@ -211,7 +211,11 @@ ENTRY(ret_from_fork)
211 popl %eax 211 popl %eax
212 pushl $0x0202 # Reset kernel eflags 212 pushl $0x0202 # Reset kernel eflags
213 popfl 213 popfl
214 jmp syscall_exit 214
215 /* When we fork, we trace the syscall return in the child, too. */
216 movl %esp, %eax
217 call syscall_return_slowpath
218 jmp restore_all
215END(ret_from_fork) 219END(ret_from_fork)
216 220
217ENTRY(ret_from_kernel_thread) 221ENTRY(ret_from_kernel_thread)
@@ -224,7 +228,15 @@ ENTRY(ret_from_kernel_thread)
224 movl PT_EBP(%esp), %eax 228 movl PT_EBP(%esp), %eax
225 call *PT_EBX(%esp) 229 call *PT_EBX(%esp)
226 movl $0, PT_EAX(%esp) 230 movl $0, PT_EAX(%esp)
227 jmp syscall_exit 231
232 /*
233 * Kernel threads return to userspace as if returning from a syscall.
234 * We should check whether anything actually uses this path and, if so,
235 * consider switching it over to ret_from_fork.
236 */
237 movl %esp, %eax
238 call syscall_return_slowpath
239 jmp restore_all
228ENDPROC(ret_from_kernel_thread) 240ENDPROC(ret_from_kernel_thread)
229 241
230/* 242/*
@@ -255,7 +267,6 @@ ret_from_intr:
255 jb resume_kernel # not returning to v8086 or userspace 267 jb resume_kernel # not returning to v8086 or userspace
256 268
257ENTRY(resume_userspace) 269ENTRY(resume_userspace)
258 LOCKDEP_SYS_EXIT
259 DISABLE_INTERRUPTS(CLBR_ANY) 270 DISABLE_INTERRUPTS(CLBR_ANY)
260 TRACE_IRQS_OFF 271 TRACE_IRQS_OFF
261 movl %esp, %eax 272 movl %esp, %eax
@@ -276,76 +287,47 @@ need_resched:
276END(resume_kernel) 287END(resume_kernel)
277#endif 288#endif
278 289
279/*
280 * SYSENTER_RETURN points to after the SYSENTER instruction
281 * in the vsyscall page. See vsyscall-sysentry.S, which defines
282 * the symbol.
283 */
284
285 # SYSENTER call handler stub 290 # SYSENTER call handler stub
286ENTRY(entry_SYSENTER_32) 291ENTRY(entry_SYSENTER_32)
287 movl TSS_sysenter_sp0(%esp), %esp 292 movl TSS_sysenter_sp0(%esp), %esp
288sysenter_past_esp: 293sysenter_past_esp:
294 pushl $__USER_DS /* pt_regs->ss */
295 pushl %ecx /* pt_regs->cx */
296 pushfl /* pt_regs->flags (except IF = 0) */
297 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
298 pushl $__USER_CS /* pt_regs->cs */
299 pushl $0 /* pt_regs->ip = 0 (placeholder) */
300 pushl %eax /* pt_regs->orig_ax */
301 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
302
289 /* 303 /*
290 * Interrupts are disabled here, but we can't trace it until 304 * User mode is traced as though IRQs are on, and SYSENTER
291 * enough kernel state to call TRACE_IRQS_OFF can be called - but 305 * turned them off.
292 * we immediately enable interrupts at that point anyway.
293 */
294 pushl $__USER_DS
295 pushl %ebp
296 pushfl
297 orl $X86_EFLAGS_IF, (%esp)
298 pushl $__USER_CS
299 /*
300 * Push current_thread_info()->sysenter_return to the stack.
301 * A tiny bit of offset fixup is necessary: TI_sysenter_return
302 * is relative to thread_info, which is at the bottom of the
303 * kernel stack page. 4*4 means the 4 words pushed above;
304 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
305 * and THREAD_SIZE takes us to the bottom.
306 */ 306 */
307 pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp) 307 TRACE_IRQS_OFF
308
309 pushl %eax
310 SAVE_ALL
311 ENABLE_INTERRUPTS(CLBR_NONE)
312
313/*
314 * Load the potential sixth argument from user stack.
315 * Careful about security.
316 */
317 cmpl $__PAGE_OFFSET-3, %ebp
318 jae syscall_fault
319 ASM_STAC
3201: movl (%ebp), %ebp
321 ASM_CLAC
322 movl %ebp, PT_EBP(%esp)
323 _ASM_EXTABLE(1b, syscall_fault)
324 308
325 GET_THREAD_INFO(%ebp) 309 movl %esp, %eax
310 call do_fast_syscall_32
311 testl %eax, %eax
312 jz .Lsyscall_32_done
326 313
327 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp) 314/* Opportunistic SYSEXIT */
328 jnz syscall_trace_entry 315 TRACE_IRQS_ON /* User mode traces as IRQs on. */
329sysenter_do_call: 316 movl PT_EIP(%esp), %edx /* pt_regs->ip */
330 cmpl $(NR_syscalls), %eax 317 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
331 jae sysenter_badsys
332 call *sys_call_table(, %eax, 4)
333sysenter_after_call:
334 movl %eax, PT_EAX(%esp)
335 LOCKDEP_SYS_EXIT
336 DISABLE_INTERRUPTS(CLBR_ANY)
337 TRACE_IRQS_OFF
338 movl TI_flags(%ebp), %ecx
339 testl $_TIF_ALLWORK_MASK, %ecx
340 jnz syscall_exit_work_irqs_off
341sysenter_exit:
342/* if something modifies registers it must also disable sysexit */
343 movl PT_EIP(%esp), %edx
344 movl PT_OLDESP(%esp), %ecx
345 xorl %ebp, %ebp
346 TRACE_IRQS_ON
3471: mov PT_FS(%esp), %fs 3181: mov PT_FS(%esp), %fs
348 PTGS_TO_GS 319 PTGS_TO_GS
320 popl %ebx /* pt_regs->bx */
321 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
322 popl %esi /* pt_regs->si */
323 popl %edi /* pt_regs->di */
324 popl %ebp /* pt_regs->bp */
325 popl %eax /* pt_regs->ax */
326
327 /*
328 * Return back to the vDSO, which will pop ecx and edx.
329 * Don't bother with DS and ES (they already contain __USER_DS).
330 */
349 ENABLE_INTERRUPTS_SYSEXIT 331 ENABLE_INTERRUPTS_SYSEXIT
350 332
351.pushsection .fixup, "ax" 333.pushsection .fixup, "ax"
@@ -359,21 +341,18 @@ ENDPROC(entry_SYSENTER_32)
359 # system call handler stub 341 # system call handler stub
360ENTRY(entry_INT80_32) 342ENTRY(entry_INT80_32)
361 ASM_CLAC 343 ASM_CLAC
362 pushl %eax # save orig_eax 344 pushl %eax /* pt_regs->orig_ax */
363 SAVE_ALL 345 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
364 GET_THREAD_INFO(%ebp) 346
365 # system call tracing in operation / emulation 347 /*
366 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp) 348 * User mode is traced as though IRQs are on. Unlike the 64-bit
367 jnz syscall_trace_entry 349 * case, INT80 is a trap gate on 32-bit kernels, so interrupts
368 cmpl $(NR_syscalls), %eax 350 * are already on (unless user code is messing around with iopl).
369 jae syscall_badsys 351 */
370syscall_call: 352
371 call *sys_call_table(, %eax, 4) 353 movl %esp, %eax
372syscall_after_call: 354 call do_syscall_32_irqs_on
373 movl %eax, PT_EAX(%esp) # store the return value 355.Lsyscall_32_done:
374syscall_exit:
375 LOCKDEP_SYS_EXIT
376 jmp syscall_exit_work
377 356
378restore_all: 357restore_all:
379 TRACE_IRQS_IRET 358 TRACE_IRQS_IRET
@@ -450,47 +429,6 @@ ldt_ss:
450#endif 429#endif
451ENDPROC(entry_INT80_32) 430ENDPROC(entry_INT80_32)
452 431
453 # perform syscall exit tracing
454 ALIGN
455syscall_trace_entry:
456 movl $-ENOSYS, PT_EAX(%esp)
457 movl %esp, %eax
458 call syscall_trace_enter
459 /* What it returned is what we'll actually use. */
460 cmpl $(NR_syscalls), %eax
461 jnae syscall_call
462 jmp syscall_exit
463END(syscall_trace_entry)
464
465 # perform syscall exit tracing
466 ALIGN
467syscall_exit_work_irqs_off:
468 TRACE_IRQS_ON
469 ENABLE_INTERRUPTS(CLBR_ANY)
470
471syscall_exit_work:
472 movl %esp, %eax
473 call syscall_return_slowpath
474 jmp restore_all
475END(syscall_exit_work)
476
477syscall_fault:
478 ASM_CLAC
479 GET_THREAD_INFO(%ebp)
480 movl $-EFAULT, PT_EAX(%esp)
481 jmp resume_userspace
482END(syscall_fault)
483
484syscall_badsys:
485 movl $-ENOSYS, %eax
486 jmp syscall_after_call
487END(syscall_badsys)
488
489sysenter_badsys:
490 movl $-ENOSYS, %eax
491 jmp sysenter_after_call
492END(sysenter_badsys)
493
494.macro FIXUP_ESPFIX_STACK 432.macro FIXUP_ESPFIX_STACK
495/* 433/*
496 * Switch back for ESPFIX stack to the normal zerobased stack 434 * Switch back for ESPFIX stack to the normal zerobased stack
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 055a01de7c8d..53616ca03244 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -391,20 +391,16 @@ GLOBAL(stub_execveat)
391 jmp return_from_execve 391 jmp return_from_execve
392END(stub_execveat) 392END(stub_execveat)
393 393
394#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) 394#if defined(CONFIG_X86_X32_ABI)
395 .align 8 395 .align 8
396GLOBAL(stub_x32_execve) 396GLOBAL(stub_x32_execve)
397GLOBAL(stub32_execve)
398 call compat_sys_execve 397 call compat_sys_execve
399 jmp return_from_execve 398 jmp return_from_execve
400END(stub32_execve)
401END(stub_x32_execve) 399END(stub_x32_execve)
402 .align 8 400 .align 8
403GLOBAL(stub_x32_execveat) 401GLOBAL(stub_x32_execveat)
404GLOBAL(stub32_execveat)
405 call compat_sys_execveat 402 call compat_sys_execveat
406 jmp return_from_execve 403 jmp return_from_execve
407END(stub32_execveat)
408END(stub_x32_execveat) 404END(stub_x32_execveat)
409#endif 405#endif
410 406
@@ -557,7 +553,6 @@ ret_from_intr:
557 jz retint_kernel 553 jz retint_kernel
558 554
559 /* Interrupt came from user space */ 555 /* Interrupt came from user space */
560 LOCKDEP_SYS_EXIT_IRQ
561GLOBAL(retint_user) 556GLOBAL(retint_user)
562 mov %rsp,%rdi 557 mov %rsp,%rdi
563 call prepare_exit_to_usermode 558 call prepare_exit_to_usermode
@@ -587,7 +582,7 @@ retint_kernel:
587 * At this label, code paths which return to kernel and to user, 582 * At this label, code paths which return to kernel and to user,
588 * which come from interrupts/exception and from syscalls, merge. 583 * which come from interrupts/exception and from syscalls, merge.
589 */ 584 */
590restore_regs_and_iret: 585GLOBAL(restore_regs_and_iret)
591 RESTORE_EXTRA_REGS 586 RESTORE_EXTRA_REGS
592restore_c_regs_and_iret: 587restore_c_regs_and_iret:
593 RESTORE_C_REGS 588 RESTORE_C_REGS
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index a9360d40fb7f..c3201830a85e 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -16,16 +16,6 @@
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/err.h> 17#include <linux/err.h>
18 18
19/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
20#include <linux/elf-em.h>
21#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
22#define __AUDIT_ARCH_LE 0x40000000
23
24#ifndef CONFIG_AUDITSYSCALL
25# define sysexit_audit ia32_ret_from_sys_call_irqs_off
26# define sysretl_audit ia32_ret_from_sys_call_irqs_off
27#endif
28
29 .section .entry.text, "ax" 19 .section .entry.text, "ax"
30 20
31#ifdef CONFIG_PARAVIRT 21#ifdef CONFIG_PARAVIRT
@@ -58,219 +48,87 @@ ENDPROC(native_usergs_sysret32)
58 * with the int 0x80 path. 48 * with the int 0x80 path.
59 */ 49 */
60ENTRY(entry_SYSENTER_compat) 50ENTRY(entry_SYSENTER_compat)
61 /* 51 /* Interrupts are off on entry. */
62 * Interrupts are off on entry.
63 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
64 * it is too small to ever cause noticeable irq latency.
65 */
66 SWAPGS_UNSAFE_STACK 52 SWAPGS_UNSAFE_STACK
67 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 53 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
68 ENABLE_INTERRUPTS(CLBR_NONE)
69 54
70 /* Zero-extending 32-bit regs, do not remove */ 55 /*
71 movl %ebp, %ebp 56 * User tracing code (ptrace or signal handlers) might assume that
57 * the saved RAX contains a 32-bit number when we're invoking a 32-bit
58 * syscall. Just in case the high bits are nonzero, zero-extend
59 * the syscall number. (This could almost certainly be deleted
60 * with no ill effects.)
61 */
72 movl %eax, %eax 62 movl %eax, %eax
73 63
74 movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
75
76 /* Construct struct pt_regs on stack */ 64 /* Construct struct pt_regs on stack */
77 pushq $__USER32_DS /* pt_regs->ss */ 65 pushq $__USER32_DS /* pt_regs->ss */
78 pushq %rbp /* pt_regs->sp */ 66 pushq %rcx /* pt_regs->sp */
79 pushfq /* pt_regs->flags */ 67
68 /*
69 * Push flags. This is nasty. First, interrupts are currently
70 * off, but we need pt_regs->flags to have IF set. Second, even
71 * if TF was set when SYSENTER started, it's clear by now. We fix
72 * that later using TIF_SINGLESTEP.
73 */
74 pushfq /* pt_regs->flags (except IF = 0) */
75 orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */
76 ASM_CLAC /* Clear AC after saving FLAGS */
77
80 pushq $__USER32_CS /* pt_regs->cs */ 78 pushq $__USER32_CS /* pt_regs->cs */
81 pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */ 79 xorq %r8,%r8
80 pushq %r8 /* pt_regs->ip = 0 (placeholder) */
82 pushq %rax /* pt_regs->orig_ax */ 81 pushq %rax /* pt_regs->orig_ax */
83 pushq %rdi /* pt_regs->di */ 82 pushq %rdi /* pt_regs->di */
84 pushq %rsi /* pt_regs->si */ 83 pushq %rsi /* pt_regs->si */
85 pushq %rdx /* pt_regs->dx */ 84 pushq %rdx /* pt_regs->dx */
86 pushq %rcx /* pt_regs->cx */ 85 pushq %rcx /* pt_regs->cx (will be overwritten) */
87 pushq $-ENOSYS /* pt_regs->ax */ 86 pushq $-ENOSYS /* pt_regs->ax */
87 pushq %r8 /* pt_regs->r8 = 0 */
88 pushq %r8 /* pt_regs->r9 = 0 */
89 pushq %r8 /* pt_regs->r10 = 0 */
90 pushq %r8 /* pt_regs->r11 = 0 */
91 pushq %rbx /* pt_regs->rbx */
92 pushq %rbp /* pt_regs->rbp */
93 pushq %r8 /* pt_regs->r12 = 0 */
94 pushq %r8 /* pt_regs->r13 = 0 */
95 pushq %r8 /* pt_regs->r14 = 0 */
96 pushq %r8 /* pt_regs->r15 = 0 */
88 cld 97 cld
89 sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
90
91 /*
92 * no need to do an access_ok check here because rbp has been
93 * 32-bit zero extended
94 */
95 ASM_STAC
961: movl (%rbp), %ebp
97 _ASM_EXTABLE(1b, ia32_badarg)
98 ASM_CLAC
99 98
100 /* 99 /*
101 * Sysenter doesn't filter flags, so we need to clear NT 100 * Sysenter doesn't filter flags, so we need to clear NT
102 * ourselves. To save a few cycles, we can check whether 101 * ourselves. To save a few cycles, we can check whether
103 * NT was set instead of doing an unconditional popfq. 102 * NT was set instead of doing an unconditional popfq.
103 * This needs to happen before enabling interrupts so that
104 * we don't get preempted with NT set.
105 *
106 * NB.: sysenter_fix_flags is a label with the code under it moved
107 * out-of-line as an optimization: NT is unlikely to be set in the
108 * majority of the cases and instead of polluting the I$ unnecessarily,
109 * we're keeping that code behind a branch which will predict as
110 * not-taken and therefore its instructions won't be fetched.
104 */ 111 */
105 testl $X86_EFLAGS_NT, EFLAGS(%rsp) 112 testl $X86_EFLAGS_NT, EFLAGS(%rsp)
106 jnz sysenter_fix_flags 113 jnz sysenter_fix_flags
107sysenter_flags_fixed: 114sysenter_flags_fixed:
108 115
109 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
110 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
111 jnz sysenter_tracesys
112
113sysenter_do_call:
114 /* 32-bit syscall -> 64-bit C ABI argument conversion */
115 movl %edi, %r8d /* arg5 */
116 movl %ebp, %r9d /* arg6 */
117 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
118 movl %ebx, %edi /* arg1 */
119 movl %edx, %edx /* arg3 (zero extension) */
120sysenter_dispatch:
121 cmpq $(IA32_NR_syscalls-1), %rax
122 ja 1f
123 call *ia32_sys_call_table(, %rax, 8)
124 movq %rax, RAX(%rsp)
1251:
126 DISABLE_INTERRUPTS(CLBR_NONE)
127 TRACE_IRQS_OFF
128 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
129 jnz sysexit_audit
130sysexit_from_sys_call:
131 /* 116 /*
132 * NB: SYSEXIT is not obviously safe for 64-bit kernels -- an 117 * User mode is traced as though IRQs are on, and SYSENTER
133 * NMI between STI and SYSEXIT has poorly specified behavior, 118 * turned them off.
134 * and and NMI followed by an IRQ with usergs is fatal. So
135 * we just pretend we're using SYSEXIT but we really use
136 * SYSRETL instead.
137 *
138 * This code path is still called 'sysexit' because it pairs
139 * with 'sysenter' and it uses the SYSENTER calling convention.
140 */ 119 */
141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
142 movl RIP(%rsp), %ecx /* User %eip */
143 movq RAX(%rsp), %rax
144 movl RSI(%rsp), %esi
145 movl RDI(%rsp), %edi
146 xorl %edx, %edx /* Do not leak kernel information */
147 xorq %r8, %r8
148 xorq %r9, %r9
149 xorq %r10, %r10
150 movl EFLAGS(%rsp), %r11d /* User eflags */
151 TRACE_IRQS_ON
152
153 /*
154 * SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT,
155 * since it avoids a dicey window with interrupts enabled.
156 */
157 movl RSP(%rsp), %esp
158
159 /*
160 * USERGS_SYSRET32 does:
161 * gsbase = user's gs base
162 * eip = ecx
163 * rflags = r11
164 * cs = __USER32_CS
165 * ss = __USER_DS
166 *
167 * The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
168 *
169 * pop %ebp
170 * pop %edx
171 * pop %ecx
172 *
173 * Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
174 * avoid info leaks. R11 ends up with VDSO32_SYSENTER_RETURN's
175 * address (already known to user code), and R12-R15 are
176 * callee-saved and therefore don't contain any interesting
177 * kernel data.
178 */
179 USERGS_SYSRET32
180
181#ifdef CONFIG_AUDITSYSCALL
182 .macro auditsys_entry_common
183 /*
184 * At this point, registers hold syscall args in the 32-bit syscall ABI:
185 * EAX is syscall number, the 6 args are in EBX,ECX,EDX,ESI,EDI,EBP.
186 *
187 * We want to pass them to __audit_syscall_entry(), which is a 64-bit
188 * C function with 5 parameters, so shuffle them to match what
189 * the function expects: RDI,RSI,RDX,RCX,R8.
190 */
191 movl %esi, %r8d /* arg5 (R8 ) <= 4th syscall arg (ESI) */
192 xchg %ecx, %edx /* arg4 (RCX) <= 3rd syscall arg (EDX) */
193 /* arg3 (RDX) <= 2nd syscall arg (ECX) */
194 movl %ebx, %esi /* arg2 (RSI) <= 1st syscall arg (EBX) */
195 movl %eax, %edi /* arg1 (RDI) <= syscall number (EAX) */
196 call __audit_syscall_entry
197
198 /*
199 * We are going to jump back to the syscall dispatch code.
200 * Prepare syscall args as required by the 64-bit C ABI.
201 * Registers clobbered by __audit_syscall_entry() are
202 * loaded from pt_regs on stack:
203 */
204 movl ORIG_RAX(%rsp), %eax /* syscall number */
205 movl %ebx, %edi /* arg1 */
206 movl RCX(%rsp), %esi /* arg2 */
207 movl RDX(%rsp), %edx /* arg3 */
208 movl RSI(%rsp), %ecx /* arg4 */
209 movl RDI(%rsp), %r8d /* arg5 */
210 .endm
211
212 .macro auditsys_exit exit
213 TRACE_IRQS_ON
214 ENABLE_INTERRUPTS(CLBR_NONE)
215 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
216 jnz ia32_ret_from_sys_call
217 movl %eax, %esi /* second arg, syscall return value */
218 cmpl $-MAX_ERRNO, %eax /* is it an error ? */
219 jbe 1f
220 movslq %eax, %rsi /* if error sign extend to 64 bits */
2211: setbe %al /* 1 if error, 0 if not */
222 movzbl %al, %edi /* zero-extend that into %edi */
223 call __audit_syscall_exit
224 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
225 DISABLE_INTERRUPTS(CLBR_NONE)
226 TRACE_IRQS_OFF 120 TRACE_IRQS_OFF
227 testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
228 jz \exit
229 xorl %eax, %eax /* Do not leak kernel information */
230 movq %rax, R11(%rsp)
231 movq %rax, R10(%rsp)
232 movq %rax, R9(%rsp)
233 movq %rax, R8(%rsp)
234 jmp int_ret_from_sys_call_irqs_off
235 .endm
236 121
237sysenter_auditsys: 122 movq %rsp, %rdi
238 auditsys_entry_common 123 call do_fast_syscall_32
239 movl %ebp, %r9d /* reload 6th syscall arg */ 124 testl %eax, %eax
240 jmp sysenter_dispatch 125 jz .Lsyscall_32_done
241 126 jmp sysret32_from_system_call
242sysexit_audit:
243 auditsys_exit sysexit_from_sys_call
244#endif
245 127
246sysenter_fix_flags: 128sysenter_fix_flags:
247 pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) 129 pushq $X86_EFLAGS_FIXED
248 popfq 130 popfq
249 jmp sysenter_flags_fixed 131 jmp sysenter_flags_fixed
250
251sysenter_tracesys:
252#ifdef CONFIG_AUDITSYSCALL
253 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
254 jz sysenter_auditsys
255#endif
256 SAVE_EXTRA_REGS
257 xorl %eax, %eax /* Do not leak kernel information */
258 movq %rax, R11(%rsp)
259 movq %rax, R10(%rsp)
260 movq %rax, R9(%rsp)
261 movq %rax, R8(%rsp)
262 movq %rsp, %rdi /* &pt_regs -> arg1 */
263 call syscall_trace_enter
264
265 /* Reload arg registers from stack. (see sysenter_tracesys) */
266 movl RCX(%rsp), %ecx
267 movl RDX(%rsp), %edx
268 movl RSI(%rsp), %esi
269 movl RDI(%rsp), %edi
270 movl %eax, %eax /* zero extension */
271
272 RESTORE_EXTRA_REGS
273 jmp sysenter_do_call
274ENDPROC(entry_SYSENTER_compat) 132ENDPROC(entry_SYSENTER_compat)
275 133
276/* 134/*
@@ -298,21 +156,14 @@ ENDPROC(entry_SYSENTER_compat)
298 * edi arg5 156 * edi arg5
299 * esp user stack 157 * esp user stack
300 * 0(%esp) arg6 158 * 0(%esp) arg6
301 *
302 * This is purely a fast path. For anything complicated we use the int 0x80
303 * path below. We set up a complete hardware stack frame to share code
304 * with the int 0x80 path.
305 */ 159 */
306ENTRY(entry_SYSCALL_compat) 160ENTRY(entry_SYSCALL_compat)
307 /* 161 /* Interrupts are off on entry. */
308 * Interrupts are off on entry.
309 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
310 * it is too small to ever cause noticeable irq latency.
311 */
312 SWAPGS_UNSAFE_STACK 162 SWAPGS_UNSAFE_STACK
163
164 /* Stash user ESP and switch to the kernel stack. */
313 movl %esp, %r8d 165 movl %esp, %r8d
314 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 166 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
315 ENABLE_INTERRUPTS(CLBR_NONE)
316 167
317 /* Zero-extending 32-bit regs, do not remove */ 168 /* Zero-extending 32-bit regs, do not remove */
318 movl %eax, %eax 169 movl %eax, %eax
@@ -327,162 +178,67 @@ ENTRY(entry_SYSCALL_compat)
327 pushq %rdi /* pt_regs->di */ 178 pushq %rdi /* pt_regs->di */
328 pushq %rsi /* pt_regs->si */ 179 pushq %rsi /* pt_regs->si */
329 pushq %rdx /* pt_regs->dx */ 180 pushq %rdx /* pt_regs->dx */
330 pushq %rbp /* pt_regs->cx */ 181 pushq %rcx /* pt_regs->cx (will be overwritten) */
331 movl %ebp, %ecx
332 pushq $-ENOSYS /* pt_regs->ax */ 182 pushq $-ENOSYS /* pt_regs->ax */
333 sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */ 183 xorq %r8,%r8
184 pushq %r8 /* pt_regs->r8 = 0 */
185 pushq %r8 /* pt_regs->r9 = 0 */
186 pushq %r8 /* pt_regs->r10 = 0 */
187 pushq %r8 /* pt_regs->r11 = 0 */
188 pushq %rbx /* pt_regs->rbx */
189 pushq %rbp /* pt_regs->rbp */
190 pushq %r8 /* pt_regs->r12 = 0 */
191 pushq %r8 /* pt_regs->r13 = 0 */
192 pushq %r8 /* pt_regs->r14 = 0 */
193 pushq %r8 /* pt_regs->r15 = 0 */
334 194
335 /* 195 /*
336 * No need to do an access_ok check here because r8 has been 196 * User mode is traced as though IRQs are on, and SYSENTER
337 * 32-bit zero extended: 197 * turned them off.
338 */ 198 */
339 ASM_STAC
3401: movl (%r8), %r9d
341 _ASM_EXTABLE(1b, ia32_badarg)
342 ASM_CLAC
343 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
344 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
345 jnz cstar_tracesys
346
347cstar_do_call:
348 /* 32-bit syscall -> 64-bit C ABI argument conversion */
349 movl %edi, %r8d /* arg5 */
350 /* r9 already loaded */ /* arg6 */
351 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
352 movl %ebx, %edi /* arg1 */
353 movl %edx, %edx /* arg3 (zero extension) */
354
355cstar_dispatch:
356 cmpq $(IA32_NR_syscalls-1), %rax
357 ja 1f
358
359 call *ia32_sys_call_table(, %rax, 8)
360 movq %rax, RAX(%rsp)
3611:
362 DISABLE_INTERRUPTS(CLBR_NONE)
363 TRACE_IRQS_OFF 199 TRACE_IRQS_OFF
364 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
365 jnz sysretl_audit
366 200
367sysretl_from_sys_call: 201 movq %rsp, %rdi
368 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 202 call do_fast_syscall_32
369 movl RDX(%rsp), %edx 203 testl %eax, %eax
370 movl RSI(%rsp), %esi 204 jz .Lsyscall_32_done
371 movl RDI(%rsp), %edi 205
372 movl RIP(%rsp), %ecx 206 /* Opportunistic SYSRET */
373 movl EFLAGS(%rsp), %r11d 207sysret32_from_system_call:
374 movq RAX(%rsp), %rax 208 TRACE_IRQS_ON /* User mode traces as IRQs on. */
375 xorq %r10, %r10 209 movq RBX(%rsp), %rbx /* pt_regs->rbx */
376 xorq %r9, %r9 210 movq RBP(%rsp), %rbp /* pt_regs->rbp */
377 xorq %r8, %r8 211 movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */
378 TRACE_IRQS_ON 212 movq RIP(%rsp), %rcx /* pt_regs->ip (in rcx) */
379 movl RSP(%rsp), %esp 213 addq $RAX, %rsp /* Skip r8-r15 */
380 /* 214 popq %rax /* pt_regs->rax */
381 * 64-bit->32-bit SYSRET restores eip from ecx, 215 popq %rdx /* Skip pt_regs->cx */
382 * eflags from r11 (but RF and VM bits are forced to 0), 216 popq %rdx /* pt_regs->dx */
383 * cs and ss are loaded from MSRs. 217 popq %rsi /* pt_regs->si */
384 * (Note: 32-bit->32-bit SYSRET is different: since r11 218 popq %rdi /* pt_regs->di */
385 * does not exist, it merely sets eflags.IF=1). 219
220 /*
221 * USERGS_SYSRET32 does:
222 * GSBASE = user's GS base
223 * EIP = ECX
224 * RFLAGS = R11
225 * CS = __USER32_CS
226 * SS = __USER_DS
227 *
228 * ECX will not match pt_regs->cx, but we're returning to a vDSO
229 * trampoline that will fix up RCX, so this is okay.
386 * 230 *
387 * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss 231 * R12-R15 are callee-saved, so they contain whatever was in them
388 * descriptor is not reinitialized. This means that we must 232 * when the system call started, which is already known to user
389 * avoid SYSRET with SS == NULL, which could happen if we schedule, 233 * code. We zero R8-R10 to avoid info leaks.
390 * exit the kernel, and re-enter using an interrupt vector. (All 234 */
391 * interrupt entries on x86_64 set SS to NULL.) We prevent that 235 xorq %r8, %r8
392 * from happening by reloading SS in __switch_to. 236 xorq %r9, %r9
393 */ 237 xorq %r10, %r10
394 USERGS_SYSRET32 238 movq RSP-ORIG_RAX(%rsp), %rsp
395 239 USERGS_SYSRET32
396#ifdef CONFIG_AUDITSYSCALL
397cstar_auditsys:
398 movl %r9d, R9(%rsp) /* register to be clobbered by call */
399 auditsys_entry_common
400 movl R9(%rsp), %r9d /* reload 6th syscall arg */
401 jmp cstar_dispatch
402
403sysretl_audit:
404 auditsys_exit sysretl_from_sys_call
405#endif
406
407cstar_tracesys:
408#ifdef CONFIG_AUDITSYSCALL
409 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
410 jz cstar_auditsys
411#endif
412 xchgl %r9d, %ebp
413 SAVE_EXTRA_REGS
414 xorl %eax, %eax /* Do not leak kernel information */
415 movq %rax, R11(%rsp)
416 movq %rax, R10(%rsp)
417 movq %r9, R9(%rsp)
418 movq %rax, R8(%rsp)
419 movq %rsp, %rdi /* &pt_regs -> arg1 */
420 call syscall_trace_enter
421 movl R9(%rsp), %r9d
422
423 /* Reload arg registers from stack. (see sysenter_tracesys) */
424 movl RCX(%rsp), %ecx
425 movl RDX(%rsp), %edx
426 movl RSI(%rsp), %esi
427 movl RDI(%rsp), %edi
428 movl %eax, %eax /* zero extension */
429
430 RESTORE_EXTRA_REGS
431 xchgl %ebp, %r9d
432 jmp cstar_do_call
433END(entry_SYSCALL_compat) 240END(entry_SYSCALL_compat)
434 241
435ia32_badarg:
436 /*
437 * So far, we've entered kernel mode, set AC, turned on IRQs, and
438 * saved C regs except r8-r11. We haven't done any of the other
439 * standard entry work, though. We want to bail, but we shouldn't
440 * treat this as a syscall entry since we don't even know what the
441 * args are. Instead, treat this as a non-syscall entry, finish
442 * the entry work, and immediately exit after setting AX = -EFAULT.
443 *
444 * We're really just being polite here. Killing the task outright
445 * would be a reasonable action, too. Given that the only valid
446 * way to have gotten here is through the vDSO, and we already know
447 * that the stack pointer is bad, the task isn't going to survive
448 * for long no matter what we do.
449 */
450
451 ASM_CLAC /* undo STAC */
452 movq $-EFAULT, RAX(%rsp) /* return -EFAULT if possible */
453
454 /* Fill in the rest of pt_regs */
455 xorl %eax, %eax
456 movq %rax, R11(%rsp)
457 movq %rax, R10(%rsp)
458 movq %rax, R9(%rsp)
459 movq %rax, R8(%rsp)
460 SAVE_EXTRA_REGS
461
462 /* Turn IRQs back off. */
463 DISABLE_INTERRUPTS(CLBR_NONE)
464 TRACE_IRQS_OFF
465
466 /* Now finish entering normal kernel mode. */
467#ifdef CONFIG_CONTEXT_TRACKING
468 call enter_from_user_mode
469#endif
470
471 /* And exit again. */
472 jmp retint_user
473
474ia32_ret_from_sys_call_irqs_off:
475 TRACE_IRQS_ON
476 ENABLE_INTERRUPTS(CLBR_NONE)
477
478ia32_ret_from_sys_call:
479 xorl %eax, %eax /* Do not leak kernel information */
480 movq %rax, R11(%rsp)
481 movq %rax, R10(%rsp)
482 movq %rax, R9(%rsp)
483 movq %rax, R8(%rsp)
484 jmp int_ret_from_sys_call
485
486/* 242/*
487 * Emulated IA32 system calls via int 0x80. 243 * Emulated IA32 system calls via int 0x80.
488 * 244 *
@@ -507,14 +263,17 @@ ia32_ret_from_sys_call:
507ENTRY(entry_INT80_compat) 263ENTRY(entry_INT80_compat)
508 /* 264 /*
509 * Interrupts are off on entry. 265 * Interrupts are off on entry.
510 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
511 * it is too small to ever cause noticeable irq latency.
512 */ 266 */
513 PARAVIRT_ADJUST_EXCEPTION_FRAME 267 PARAVIRT_ADJUST_EXCEPTION_FRAME
514 SWAPGS 268 SWAPGS
515 ENABLE_INTERRUPTS(CLBR_NONE)
516 269
517 /* Zero-extending 32-bit regs, do not remove */ 270 /*
271 * User tracing code (ptrace or signal handlers) might assume that
272 * the saved RAX contains a 32-bit number when we're invoking a 32-bit
273 * syscall. Just in case the high bits are nonzero, zero-extend
274 * the syscall number. (This could almost certainly be deleted
275 * with no ill effects.)
276 */
518 movl %eax, %eax 277 movl %eax, %eax
519 278
520 /* Construct struct pt_regs on stack (iret frame is already on stack) */ 279 /* Construct struct pt_regs on stack (iret frame is already on stack) */
@@ -524,67 +283,37 @@ ENTRY(entry_INT80_compat)
524 pushq %rdx /* pt_regs->dx */ 283 pushq %rdx /* pt_regs->dx */
525 pushq %rcx /* pt_regs->cx */ 284 pushq %rcx /* pt_regs->cx */
526 pushq $-ENOSYS /* pt_regs->ax */ 285 pushq $-ENOSYS /* pt_regs->ax */
527 pushq $0 /* pt_regs->r8 */ 286 xorq %r8,%r8
528 pushq $0 /* pt_regs->r9 */ 287 pushq %r8 /* pt_regs->r8 = 0 */
529 pushq $0 /* pt_regs->r10 */ 288 pushq %r8 /* pt_regs->r9 = 0 */
530 pushq $0 /* pt_regs->r11 */ 289 pushq %r8 /* pt_regs->r10 = 0 */
290 pushq %r8 /* pt_regs->r11 = 0 */
291 pushq %rbx /* pt_regs->rbx */
292 pushq %rbp /* pt_regs->rbp */
293 pushq %r12 /* pt_regs->r12 */
294 pushq %r13 /* pt_regs->r13 */
295 pushq %r14 /* pt_regs->r14 */
296 pushq %r15 /* pt_regs->r15 */
531 cld 297 cld
532 sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
533
534 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
535 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
536 jnz ia32_tracesys
537
538ia32_do_call:
539 /* 32-bit syscall -> 64-bit C ABI argument conversion */
540 movl %edi, %r8d /* arg5 */
541 movl %ebp, %r9d /* arg6 */
542 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
543 movl %ebx, %edi /* arg1 */
544 movl %edx, %edx /* arg3 (zero extension) */
545 cmpq $(IA32_NR_syscalls-1), %rax
546 ja 1f
547 298
548 call *ia32_sys_call_table(, %rax, 8)
549 movq %rax, RAX(%rsp)
5501:
551 jmp int_ret_from_sys_call
552
553ia32_tracesys:
554 SAVE_EXTRA_REGS
555 movq %rsp, %rdi /* &pt_regs -> arg1 */
556 call syscall_trace_enter
557 /* 299 /*
558 * Reload arg registers from stack in case ptrace changed them. 300 * User mode is traced as though IRQs are on, and the interrupt
559 * Don't reload %eax because syscall_trace_enter() returned 301 * gate turned them off.
560 * the %rax value we should see. But do truncate it to 32 bits.
561 * If it's -1 to make us punt the syscall, then (u32)-1 is still
562 * an appropriately invalid value.
563 */ 302 */
564 movl RCX(%rsp), %ecx 303 TRACE_IRQS_OFF
565 movl RDX(%rsp), %edx
566 movl RSI(%rsp), %esi
567 movl RDI(%rsp), %edi
568 movl %eax, %eax /* zero extension */
569 RESTORE_EXTRA_REGS
570 jmp ia32_do_call
571END(entry_INT80_compat)
572 304
573 .macro PTREGSCALL label, func 305 movq %rsp, %rdi
574 ALIGN 306 call do_syscall_32_irqs_off
575GLOBAL(\label) 307.Lsyscall_32_done:
576 leaq \func(%rip), %rax
577 jmp ia32_ptregs_common
578 .endm
579 308
580 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn 309 /* Go back to user mode. */
581 PTREGSCALL stub32_sigreturn, sys32_sigreturn 310 TRACE_IRQS_ON
582 PTREGSCALL stub32_fork, sys_fork 311 SWAPGS
583 PTREGSCALL stub32_vfork, sys_vfork 312 jmp restore_regs_and_iret
313END(entry_INT80_compat)
584 314
585 ALIGN 315 ALIGN
586GLOBAL(stub32_clone) 316GLOBAL(stub32_clone)
587 leaq sys_clone(%rip), %rax
588 /* 317 /*
589 * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr). 318 * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
590 * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val). 319 * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
@@ -593,12 +322,4 @@ GLOBAL(stub32_clone)
593 * so we need to swap arguments here before calling it: 322 * so we need to swap arguments here before calling it:
594 */ 323 */
595 xchg %r8, %rcx 324 xchg %r8, %rcx
596 jmp ia32_ptregs_common 325 jmp sys_clone
597
598 ALIGN
599ia32_ptregs_common:
600 SAVE_EXTRA_REGS 8
601 call *%rax
602 RESTORE_EXTRA_REGS 8
603 ret
604END(ia32_ptregs_common)
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 8ea34f94e973..9a6649857106 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -4,24 +4,21 @@
4#include <linux/sys.h> 4#include <linux/sys.h>
5#include <linux/cache.h> 5#include <linux/cache.h>
6#include <asm/asm-offsets.h> 6#include <asm/asm-offsets.h>
7#include <asm/syscall.h>
7 8
8#ifdef CONFIG_IA32_EMULATION 9#ifdef CONFIG_IA32_EMULATION
9#define SYM(sym, compat) compat 10#define SYM(sym, compat) compat
10#else 11#else
11#define SYM(sym, compat) sym 12#define SYM(sym, compat) sym
12#define ia32_sys_call_table sys_call_table
13#define __NR_syscall_compat_max __NR_syscall_max
14#endif 13#endif
15 14
16#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ; 15#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage long SYM(sym, compat)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
17#include <asm/syscalls_32.h> 16#include <asm/syscalls_32.h>
18#undef __SYSCALL_I386 17#undef __SYSCALL_I386
19 18
20#define __SYSCALL_I386(nr, sym, compat) [nr] = SYM(sym, compat), 19#define __SYSCALL_I386(nr, sym, compat) [nr] = SYM(sym, compat),
21 20
22typedef asmlinkage void (*sys_call_ptr_t)(void); 21extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
23
24extern asmlinkage void sys_ni_syscall(void);
25 22
26__visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] = { 23__visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] = {
27 /* 24 /*
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index 4ac730b37f0b..41283d22be7a 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -14,13 +14,13 @@
14# define __SYSCALL_X32(nr, sym, compat) /* nothing */ 14# define __SYSCALL_X32(nr, sym, compat) /* nothing */
15#endif 15#endif
16 16
17#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; 17#define __SYSCALL_64(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
18#include <asm/syscalls_64.h> 18#include <asm/syscalls_64.h>
19#undef __SYSCALL_64 19#undef __SYSCALL_64
20 20
21#define __SYSCALL_64(nr, sym, compat) [nr] = sym, 21#define __SYSCALL_64(nr, sym, compat) [nr] = sym,
22 22
23extern void sys_ni_syscall(void); 23extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
24 24
25asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { 25asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
26 /* 26 /*
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 7663c455b9f6..caa2c712d1e7 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -8,7 +8,7 @@
8# 8#
90 i386 restart_syscall sys_restart_syscall 90 i386 restart_syscall sys_restart_syscall
101 i386 exit sys_exit 101 i386 exit sys_exit
112 i386 fork sys_fork stub32_fork 112 i386 fork sys_fork sys_fork
123 i386 read sys_read 123 i386 read sys_read
134 i386 write sys_write 134 i386 write sys_write
145 i386 open sys_open compat_sys_open 145 i386 open sys_open compat_sys_open
@@ -17,7 +17,7 @@
178 i386 creat sys_creat 178 i386 creat sys_creat
189 i386 link sys_link 189 i386 link sys_link
1910 i386 unlink sys_unlink 1910 i386 unlink sys_unlink
2011 i386 execve sys_execve stub32_execve 2011 i386 execve sys_execve compat_sys_execve
2112 i386 chdir sys_chdir 2112 i386 chdir sys_chdir
2213 i386 time sys_time compat_sys_time 2213 i386 time sys_time compat_sys_time
2314 i386 mknod sys_mknod 2314 i386 mknod sys_mknod
@@ -125,7 +125,7 @@
125116 i386 sysinfo sys_sysinfo compat_sys_sysinfo 125116 i386 sysinfo sys_sysinfo compat_sys_sysinfo
126117 i386 ipc sys_ipc compat_sys_ipc 126117 i386 ipc sys_ipc compat_sys_ipc
127118 i386 fsync sys_fsync 127118 i386 fsync sys_fsync
128119 i386 sigreturn sys_sigreturn stub32_sigreturn 128119 i386 sigreturn sys_sigreturn sys32_sigreturn
129120 i386 clone sys_clone stub32_clone 129120 i386 clone sys_clone stub32_clone
130121 i386 setdomainname sys_setdomainname 130121 i386 setdomainname sys_setdomainname
131122 i386 uname sys_newuname 131122 i386 uname sys_newuname
@@ -179,7 +179,7 @@
179170 i386 setresgid sys_setresgid16 179170 i386 setresgid sys_setresgid16
180171 i386 getresgid sys_getresgid16 180171 i386 getresgid sys_getresgid16
181172 i386 prctl sys_prctl 181172 i386 prctl sys_prctl
182173 i386 rt_sigreturn sys_rt_sigreturn stub32_rt_sigreturn 182173 i386 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
183174 i386 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 183174 i386 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
184175 i386 rt_sigprocmask sys_rt_sigprocmask 184175 i386 rt_sigprocmask sys_rt_sigprocmask
185176 i386 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending 185176 i386 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
@@ -196,7 +196,7 @@
196187 i386 sendfile sys_sendfile compat_sys_sendfile 196187 i386 sendfile sys_sendfile compat_sys_sendfile
197188 i386 getpmsg 197188 i386 getpmsg
198189 i386 putpmsg 198189 i386 putpmsg
199190 i386 vfork sys_vfork stub32_vfork 199190 i386 vfork sys_vfork sys_vfork
200191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit 200191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit
201192 i386 mmap2 sys_mmap_pgoff 201192 i386 mmap2 sys_mmap_pgoff
202193 i386 truncate64 sys_truncate64 sys32_truncate64 202193 i386 truncate64 sys_truncate64 sys32_truncate64
@@ -364,7 +364,7 @@
364355 i386 getrandom sys_getrandom 364355 i386 getrandom sys_getrandom
365356 i386 memfd_create sys_memfd_create 365356 i386 memfd_create sys_memfd_create
366357 i386 bpf sys_bpf 366357 i386 bpf sys_bpf
367358 i386 execveat sys_execveat stub32_execveat 367358 i386 execveat sys_execveat compat_sys_execveat
368359 i386 socket sys_socket 368359 i386 socket sys_socket
369360 i386 socketpair sys_socketpair 369360 i386 socketpair sys_socketpair
370361 i386 bind sys_bind 370361 i386 bind sys_bind
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index a3d0767a6b29..265c0ed68118 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -19,9 +19,7 @@ obj-y += vma.o
19# vDSO images to build 19# vDSO images to build
20vdso_img-$(VDSO64-y) += 64 20vdso_img-$(VDSO64-y) += 64
21vdso_img-$(VDSOX32-y) += x32 21vdso_img-$(VDSOX32-y) += x32
22vdso_img-$(VDSO32-y) += 32-int80 22vdso_img-$(VDSO32-y) += 32
23vdso_img-$(CONFIG_IA32_EMULATION) += 32-syscall
24vdso_img-$(VDSO32-y) += 32-sysenter
25 23
26obj-$(VDSO32-y) += vdso32-setup.o 24obj-$(VDSO32-y) += vdso32-setup.o
27 25
@@ -69,7 +67,7 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
69CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ 67CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
70 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ 68 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
71 -fno-omit-frame-pointer -foptimize-sibling-calls \ 69 -fno-omit-frame-pointer -foptimize-sibling-calls \
72 -DDISABLE_BRANCH_PROFILING 70 -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
73 71
74$(vobjs): KBUILD_CFLAGS += $(CFL) 72$(vobjs): KBUILD_CFLAGS += $(CFL)
75 73
@@ -122,15 +120,6 @@ $(obj)/%.so: $(obj)/%.so.dbg
122$(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE 120$(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
123 $(call if_changed,vdso) 121 $(call if_changed,vdso)
124 122
125#
126# Build multiple 32-bit vDSO images to choose from at boot time.
127#
128vdso32.so-$(VDSO32-y) += int80
129vdso32.so-$(CONFIG_IA32_EMULATION) += syscall
130vdso32.so-$(VDSO32-y) += sysenter
131
132vdso32-images = $(vdso32.so-y:%=vdso32-%.so)
133
134CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) 123CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
135VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1 124VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
136 125
@@ -139,14 +128,12 @@ VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
139override obj-dirs = $(dir $(obj)) $(obj)/vdso32/ 128override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
140 129
141targets += vdso32/vdso32.lds 130targets += vdso32/vdso32.lds
142targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o) 131targets += vdso32/note.o vdso32/vclock_gettime.o vdso32/system_call.o
143targets += vdso32/vclock_gettime.o 132targets += vdso32/vclock_gettime.o
144 133
145$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%) 134KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
146 135$(obj)/vdso32.so.dbg: KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
147KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) 136$(obj)/vdso32.so.dbg: asflags-$(CONFIG_X86_64) += -m32
148$(vdso32-images:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
149$(vdso32-images:%=$(obj)/%.dbg): asflags-$(CONFIG_X86_64) += -m32
150 137
151KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) 138KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
152KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32)) 139KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
@@ -157,13 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
157KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) 144KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
158KBUILD_CFLAGS_32 += -fno-omit-frame-pointer 145KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
159KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING 146KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
160$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) 147$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
161 148
162$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \ 149$(obj)/vdso32.so.dbg: FORCE \
163 $(obj)/vdso32/vdso32.lds \ 150 $(obj)/vdso32/vdso32.lds \
164 $(obj)/vdso32/vclock_gettime.o \ 151 $(obj)/vdso32/vclock_gettime.o \
165 $(obj)/vdso32/note.o \ 152 $(obj)/vdso32/note.o \
166 $(obj)/vdso32/%.o 153 $(obj)/vdso32/system_call.o
167 $(call if_changed,vdso) 154 $(call if_changed,vdso)
168 155
169# 156#
@@ -206,4 +193,4 @@ $(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
206PHONY += vdso_install $(vdso_img_insttargets) 193PHONY += vdso_install $(vdso_img_insttargets)
207vdso_install: $(vdso_img_insttargets) FORCE 194vdso_install: $(vdso_img_insttargets) FORCE
208 195
209clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* vdso64* vdso-image-*.c vdsox32.so* 196clean-files := vdso32.so vdso32.so.dbg vdso64* vdso-image-*.c vdsox32.so*
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
index 8627db24a7f6..785d9922b106 100644
--- a/arch/x86/entry/vdso/vdso2c.c
+++ b/arch/x86/entry/vdso/vdso2c.c
@@ -98,10 +98,10 @@ struct vdso_sym required_syms[] = {
98 "VDSO_FAKE_SECTION_TABLE_END", false 98 "VDSO_FAKE_SECTION_TABLE_END", false
99 }, 99 },
100 {"VDSO32_NOTE_MASK", true}, 100 {"VDSO32_NOTE_MASK", true},
101 {"VDSO32_SYSENTER_RETURN", true},
102 {"__kernel_vsyscall", true}, 101 {"__kernel_vsyscall", true},
103 {"__kernel_sigreturn", true}, 102 {"__kernel_sigreturn", true},
104 {"__kernel_rt_sigreturn", true}, 103 {"__kernel_rt_sigreturn", true},
104 {"int80_landing_pad", true},
105}; 105};
106 106
107__attribute__((format(printf, 1, 2))) __attribute__((noreturn)) 107__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index e904c270573b..08a317a9ae4b 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -48,35 +48,9 @@ __setup("vdso32=", vdso32_setup);
48__setup_param("vdso=", vdso_setup, vdso32_setup, 0); 48__setup_param("vdso=", vdso_setup, vdso32_setup, 0);
49#endif 49#endif
50 50
51#ifdef CONFIG_X86_64
52
53#define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SYSENTER32))
54#define vdso32_syscall() (boot_cpu_has(X86_FEATURE_SYSCALL32))
55
56#else /* CONFIG_X86_32 */
57
58#define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP))
59#define vdso32_syscall() (0)
60
61#endif /* CONFIG_X86_64 */
62
63#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
64const struct vdso_image *selected_vdso32;
65#endif
66
67int __init sysenter_setup(void) 51int __init sysenter_setup(void)
68{ 52{
69#ifdef CONFIG_COMPAT 53 init_vdso_image(&vdso_image_32);
70 if (vdso32_syscall())
71 selected_vdso32 = &vdso_image_32_syscall;
72 else
73#endif
74 if (vdso32_sysenter())
75 selected_vdso32 = &vdso_image_32_sysenter;
76 else
77 selected_vdso32 = &vdso_image_32_int80;
78
79 init_vdso_image(selected_vdso32);
80 54
81 return 0; 55 return 0;
82} 56}
diff --git a/arch/x86/entry/vdso/vdso32/int80.S b/arch/x86/entry/vdso/vdso32/int80.S
deleted file mode 100644
index b15b7c01aedb..000000000000
--- a/arch/x86/entry/vdso/vdso32/int80.S
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Code for the vDSO. This version uses the old int $0x80 method.
3 *
4 * First get the common code for the sigreturn entry points.
5 * This must come first.
6 */
7#include "sigreturn.S"
8
9 .text
10 .globl __kernel_vsyscall
11 .type __kernel_vsyscall,@function
12 ALIGN
13__kernel_vsyscall:
14.LSTART_vsyscall:
15 int $0x80
16 ret
17.LEND_vsyscall:
18 .size __kernel_vsyscall,.-.LSTART_vsyscall
19 .previous
20
21 .section .eh_frame,"a",@progbits
22.LSTARTFRAMEDLSI:
23 .long .LENDCIEDLSI-.LSTARTCIEDLSI
24.LSTARTCIEDLSI:
25 .long 0 /* CIE ID */
26 .byte 1 /* Version number */
27 .string "zR" /* NUL-terminated augmentation string */
28 .uleb128 1 /* Code alignment factor */
29 .sleb128 -4 /* Data alignment factor */
30 .byte 8 /* Return address register column */
31 .uleb128 1 /* Augmentation value length */
32 .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
33 .byte 0x0c /* DW_CFA_def_cfa */
34 .uleb128 4
35 .uleb128 4
36 .byte 0x88 /* DW_CFA_offset, column 0x8 */
37 .uleb128 1
38 .align 4
39.LENDCIEDLSI:
40 .long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
41.LSTARTFDEDLSI:
42 .long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
43 .long .LSTART_vsyscall-. /* PC-relative start address */
44 .long .LEND_vsyscall-.LSTART_vsyscall
45 .uleb128 0
46 .align 4
47.LENDFDEDLSI:
48 .previous
49
50 /*
51 * Pad out the segment to match the size of the sysenter.S version.
52 */
53VDSO32_vsyscall_eh_frame_size = 0x40
54 .section .data,"aw",@progbits
55 .space VDSO32_vsyscall_eh_frame_size-(.LENDFDEDLSI-.LSTARTFRAMEDLSI), 0
56 .previous
diff --git a/arch/x86/entry/vdso/vdso32/syscall.S b/arch/x86/entry/vdso/vdso32/syscall.S
deleted file mode 100644
index 6b286bb5251c..000000000000
--- a/arch/x86/entry/vdso/vdso32/syscall.S
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * Code for the vDSO. This version uses the syscall instruction.
3 *
4 * First get the common code for the sigreturn entry points.
5 * This must come first.
6 */
7#define SYSCALL_ENTER_KERNEL syscall
8#include "sigreturn.S"
9
10#include <asm/segment.h>
11
12 .text
13 .globl __kernel_vsyscall
14 .type __kernel_vsyscall,@function
15 ALIGN
16__kernel_vsyscall:
17.LSTART_vsyscall:
18 push %ebp
19.Lpush_ebp:
20 movl %ecx, %ebp
21 syscall
22 movl %ebp, %ecx
23 popl %ebp
24.Lpop_ebp:
25 ret
26.LEND_vsyscall:
27 .size __kernel_vsyscall,.-.LSTART_vsyscall
28
29 .section .eh_frame,"a",@progbits
30.LSTARTFRAME:
31 .long .LENDCIE-.LSTARTCIE
32.LSTARTCIE:
33 .long 0 /* CIE ID */
34 .byte 1 /* Version number */
35 .string "zR" /* NUL-terminated augmentation string */
36 .uleb128 1 /* Code alignment factor */
37 .sleb128 -4 /* Data alignment factor */
38 .byte 8 /* Return address register column */
39 .uleb128 1 /* Augmentation value length */
40 .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
41 .byte 0x0c /* DW_CFA_def_cfa */
42 .uleb128 4
43 .uleb128 4
44 .byte 0x88 /* DW_CFA_offset, column 0x8 */
45 .uleb128 1
46 .align 4
47.LENDCIE:
48
49 .long .LENDFDE1-.LSTARTFDE1 /* Length FDE */
50.LSTARTFDE1:
51 .long .LSTARTFDE1-.LSTARTFRAME /* CIE pointer */
52 .long .LSTART_vsyscall-. /* PC-relative start address */
53 .long .LEND_vsyscall-.LSTART_vsyscall
54 .uleb128 0 /* Augmentation length */
55 /* What follows are the instructions for the table generation.
56 We have to record all changes of the stack pointer. */
57 .byte 0x40 + .Lpush_ebp-.LSTART_vsyscall /* DW_CFA_advance_loc */
58 .byte 0x0e /* DW_CFA_def_cfa_offset */
59 .uleb128 8
60 .byte 0x85, 0x02 /* DW_CFA_offset %ebp -8 */
61 .byte 0x40 + .Lpop_ebp-.Lpush_ebp /* DW_CFA_advance_loc */
62 .byte 0xc5 /* DW_CFA_restore %ebp */
63 .byte 0x0e /* DW_CFA_def_cfa_offset */
64 .uleb128 4
65 .align 4
66.LENDFDE1:
67 .previous
68
69 /*
70 * Pad out the segment to match the size of the sysenter.S version.
71 */
72VDSO32_vsyscall_eh_frame_size = 0x40
73 .section .data,"aw",@progbits
74 .space VDSO32_vsyscall_eh_frame_size-(.LENDFDE1-.LSTARTFRAME), 0
75 .previous
diff --git a/arch/x86/entry/vdso/vdso32/sysenter.S b/arch/x86/entry/vdso/vdso32/sysenter.S
deleted file mode 100644
index e354bceee0e0..000000000000
--- a/arch/x86/entry/vdso/vdso32/sysenter.S
+++ /dev/null
@@ -1,116 +0,0 @@
1/*
2 * Code for the vDSO. This version uses the sysenter instruction.
3 *
4 * First get the common code for the sigreturn entry points.
5 * This must come first.
6 */
7#include "sigreturn.S"
8
9/*
10 * The caller puts arg2 in %ecx, which gets pushed. The kernel will use
11 * %ecx itself for arg2. The pushing is because the sysexit instruction
12 * (found in entry.S) requires that we clobber %ecx with the desired %esp.
13 * User code might expect that %ecx is unclobbered though, as it would be
14 * for returning via the iret instruction, so we must push and pop.
15 *
16 * The caller puts arg3 in %edx, which the sysexit instruction requires
17 * for %eip. Thus, exactly as for arg2, we must push and pop.
18 *
19 * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
20 * instruction clobbers %esp, the user's %esp won't even survive entry
21 * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
22 * arg6 from the stack.
23 *
24 * You can not use this vsyscall for the clone() syscall because the
25 * three words on the parent stack do not get copied to the child.
26 */
27 .text
28 .globl __kernel_vsyscall
29 .type __kernel_vsyscall,@function
30 ALIGN
31__kernel_vsyscall:
32.LSTART_vsyscall:
33 push %ecx
34.Lpush_ecx:
35 push %edx
36.Lpush_edx:
37 push %ebp
38.Lenter_kernel:
39 movl %esp,%ebp
40 sysenter
41
42 /* 7: align return point with nop's to make disassembly easier */
43 .space 7,0x90
44
45 /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
46 int $0x80
47 /* 16: System call normal return point is here! */
48VDSO32_SYSENTER_RETURN: /* Symbol used by sysenter.c via vdso32-syms.h */
49 pop %ebp
50.Lpop_ebp:
51 pop %edx
52.Lpop_edx:
53 pop %ecx
54.Lpop_ecx:
55 ret
56.LEND_vsyscall:
57 .size __kernel_vsyscall,.-.LSTART_vsyscall
58 .previous
59
60 .section .eh_frame,"a",@progbits
61.LSTARTFRAMEDLSI:
62 .long .LENDCIEDLSI-.LSTARTCIEDLSI
63.LSTARTCIEDLSI:
64 .long 0 /* CIE ID */
65 .byte 1 /* Version number */
66 .string "zR" /* NUL-terminated augmentation string */
67 .uleb128 1 /* Code alignment factor */
68 .sleb128 -4 /* Data alignment factor */
69 .byte 8 /* Return address register column */
70 .uleb128 1 /* Augmentation value length */
71 .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
72 .byte 0x0c /* DW_CFA_def_cfa */
73 .uleb128 4
74 .uleb128 4
75 .byte 0x88 /* DW_CFA_offset, column 0x8 */
76 .uleb128 1
77 .align 4
78.LENDCIEDLSI:
79 .long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
80.LSTARTFDEDLSI:
81 .long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
82 .long .LSTART_vsyscall-. /* PC-relative start address */
83 .long .LEND_vsyscall-.LSTART_vsyscall
84 .uleb128 0
85 /* What follows are the instructions for the table generation.
86 We have to record all changes of the stack pointer. */
87 .byte 0x40 + (.Lpush_ecx-.LSTART_vsyscall) /* DW_CFA_advance_loc */
88 .byte 0x0e /* DW_CFA_def_cfa_offset */
89 .byte 0x08 /* RA at offset 8 now */
90 .byte 0x40 + (.Lpush_edx-.Lpush_ecx) /* DW_CFA_advance_loc */
91 .byte 0x0e /* DW_CFA_def_cfa_offset */
92 .byte 0x0c /* RA at offset 12 now */
93 .byte 0x40 + (.Lenter_kernel-.Lpush_edx) /* DW_CFA_advance_loc */
94 .byte 0x0e /* DW_CFA_def_cfa_offset */
95 .byte 0x10 /* RA at offset 16 now */
96 .byte 0x85, 0x04 /* DW_CFA_offset %ebp -16 */
97 /* Finally the epilogue. */
98 .byte 0x40 + (.Lpop_ebp-.Lenter_kernel) /* DW_CFA_advance_loc */
99 .byte 0x0e /* DW_CFA_def_cfa_offset */
100 .byte 0x0c /* RA at offset 12 now */
101 .byte 0xc5 /* DW_CFA_restore %ebp */
102 .byte 0x40 + (.Lpop_edx-.Lpop_ebp) /* DW_CFA_advance_loc */
103 .byte 0x0e /* DW_CFA_def_cfa_offset */
104 .byte 0x08 /* RA at offset 8 now */
105 .byte 0x40 + (.Lpop_ecx-.Lpop_edx) /* DW_CFA_advance_loc */
106 .byte 0x0e /* DW_CFA_def_cfa_offset */
107 .byte 0x04 /* RA at offset 4 now */
108 .align 4
109.LENDFDEDLSI:
110 .previous
111
112 /*
113 * Emit a symbol with the size of this .eh_frame data,
114 * to verify it matches the other versions.
115 */
116VDSO32_vsyscall_eh_frame_size = (.LENDFDEDLSI-.LSTARTFRAMEDLSI)
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
new file mode 100644
index 000000000000..93bd8452383f
--- /dev/null
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -0,0 +1,57 @@
1/*
2 * Code for the vDSO. This version uses the old int $0x80 method.
3*/
4
5#include <asm/dwarf2.h>
6#include <asm/cpufeature.h>
7#include <asm/alternative-asm.h>
8
9/*
10 * First get the common code for the sigreturn entry points.
11 * This must come first.
12 */
13#include "sigreturn.S"
14
15 .text
16 .globl __kernel_vsyscall
17 .type __kernel_vsyscall,@function
18 ALIGN
19__kernel_vsyscall:
20 CFI_STARTPROC
21 /*
22 * Reshuffle regs so that all of any of the entry instructions
23 * will preserve enough state.
24 */
25 pushl %edx
26 CFI_ADJUST_CFA_OFFSET 4
27 CFI_REL_OFFSET edx, 0
28 pushl %ecx
29 CFI_ADJUST_CFA_OFFSET 4
30 CFI_REL_OFFSET ecx, 0
31 movl %esp, %ecx
32
33#ifdef CONFIG_X86_64
34 /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
35 ALTERNATIVE_2 "", "sysenter", X86_FEATURE_SYSENTER32, \
36 "syscall", X86_FEATURE_SYSCALL32
37#else
38 ALTERNATIVE "", "sysenter", X86_FEATURE_SEP
39#endif
40
41 /* Enter using int $0x80 */
42 movl (%esp), %ecx
43 int $0x80
44GLOBAL(int80_landing_pad)
45
46 /* Restore ECX and EDX in case they were clobbered. */
47 popl %ecx
48 CFI_RESTORE ecx
49 CFI_ADJUST_CFA_OFFSET -4
50 popl %edx
51 CFI_RESTORE edx
52 CFI_ADJUST_CFA_OFFSET -4
53 ret
54 CFI_ENDPROC
55
56 .size __kernel_vsyscall,.-__kernel_vsyscall
57 .previous
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 434543145d78..64df47148160 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -180,21 +180,10 @@ up_fail:
180#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) 180#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
181static int load_vdso32(void) 181static int load_vdso32(void)
182{ 182{
183 int ret;
184
185 if (vdso32_enabled != 1) /* Other values all mean "disabled" */ 183 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
186 return 0; 184 return 0;
187 185
188 ret = map_vdso(selected_vdso32, false); 186 return map_vdso(&vdso_image_32, false);
189 if (ret)
190 return ret;
191
192 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
193 current_thread_info()->sysenter_return =
194 current->mm->context.vdso +
195 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
196
197 return 0;
198} 187}
199#endif 188#endif
200 189
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index b160c0c6baed..174c2549939d 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -38,7 +38,14 @@
38#define CREATE_TRACE_POINTS 38#define CREATE_TRACE_POINTS
39#include "vsyscall_trace.h" 39#include "vsyscall_trace.h"
40 40
41static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; 41static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
42#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
43 NATIVE;
44#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
45 NONE;
46#else
47 EMULATE;
48#endif
42 49
43static int __init vsyscall_setup(char *str) 50static int __init vsyscall_setup(char *str)
44{ 51{
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index a0a19b7ba22d..e6a5c275cd3f 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -289,7 +289,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
289 /* Return stub is in 32bit vsyscall page */ 289 /* Return stub is in 32bit vsyscall page */
290 if (current->mm->context.vdso) 290 if (current->mm->context.vdso)
291 restorer = current->mm->context.vdso + 291 restorer = current->mm->context.vdso +
292 selected_vdso32->sym___kernel_sigreturn; 292 vdso_image_32.sym___kernel_sigreturn;
293 else 293 else
294 restorer = &frame->retcode; 294 restorer = &frame->retcode;
295 } 295 }
@@ -368,7 +368,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
368 restorer = ksig->ka.sa.sa_restorer; 368 restorer = ksig->ka.sa.sa_restorer;
369 else 369 else
370 restorer = current->mm->context.vdso + 370 restorer = current->mm->context.vdso +
371 selected_vdso32->sym___kernel_rt_sigreturn; 371 vdso_image_32.sym___kernel_rt_sigreturn;
372 put_user_ex(ptr_to_compat(restorer), &frame->pretcode); 372 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
373 373
374 /* 374 /*
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 3a45668f6dc3..94c18ebfd68c 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -32,6 +32,10 @@
32#include <asm/mpspec.h> 32#include <asm/mpspec.h>
33#include <asm/realmode.h> 33#include <asm/realmode.h>
34 34
35#ifdef CONFIG_ACPI_APEI
36# include <asm/pgtable_types.h>
37#endif
38
35#ifdef CONFIG_ACPI 39#ifdef CONFIG_ACPI
36extern int acpi_lapic; 40extern int acpi_lapic;
37extern int acpi_ioapic; 41extern int acpi_ioapic;
@@ -147,4 +151,23 @@ extern int x86_acpi_numa_init(void);
147 151
148#define acpi_unlazy_tlb(x) leave_mm(x) 152#define acpi_unlazy_tlb(x) leave_mm(x)
149 153
154#ifdef CONFIG_ACPI_APEI
155static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
156{
157 /*
158 * We currently have no way to look up the EFI memory map
159 * attributes for a region in a consistent way, because the
160 * memmap is discarded after efi_free_boot_services(). So if
161 * you call efi_mem_attributes() during boot and at runtime,
162 * you could theoretically see different attributes.
163 *
164 * Since we are yet to see any x86 platforms that require
165 * anything other than PAGE_KERNEL (some arm64 platforms
166 * require the equivalent of PAGE_KERNEL_NOCACHE), return that
167 * until we know differently.
168 */
169 return PAGE_KERNEL;
170}
171#endif
172
150#endif /* _ASM_X86_ACPI_H */ 173#endif /* _ASM_X86_ACPI_H */
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 1a5da2e63aee..3c56ef1ae068 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -81,7 +81,7 @@ static inline struct amd_northbridge *node_to_amd_nb(int node)
81 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 81 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
82} 82}
83 83
84static inline u16 amd_get_node_id(struct pci_dev *pdev) 84static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
85{ 85{
86 struct pci_dev *misc; 86 struct pci_dev *misc;
87 int i; 87 int i;
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index ebf6d5e5668c..a30316bf801a 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -115,6 +115,59 @@ static inline bool apic_is_x2apic_enabled(void)
115 return msr & X2APIC_ENABLE; 115 return msr & X2APIC_ENABLE;
116} 116}
117 117
118extern void enable_IR_x2apic(void);
119
120extern int get_physical_broadcast(void);
121
122extern int lapic_get_maxlvt(void);
123extern void clear_local_APIC(void);
124extern void disconnect_bsp_APIC(int virt_wire_setup);
125extern void disable_local_APIC(void);
126extern void lapic_shutdown(void);
127extern void sync_Arb_IDs(void);
128extern void init_bsp_APIC(void);
129extern void setup_local_APIC(void);
130extern void init_apic_mappings(void);
131void register_lapic_address(unsigned long address);
132extern void setup_boot_APIC_clock(void);
133extern void setup_secondary_APIC_clock(void);
134extern int APIC_init_uniprocessor(void);
135
136#ifdef CONFIG_X86_64
137static inline int apic_force_enable(unsigned long addr)
138{
139 return -1;
140}
141#else
142extern int apic_force_enable(unsigned long addr);
143#endif
144
145extern int apic_bsp_setup(bool upmode);
146extern void apic_ap_setup(void);
147
148/*
149 * On 32bit this is mach-xxx local
150 */
151#ifdef CONFIG_X86_64
152extern int apic_is_clustered_box(void);
153#else
154static inline int apic_is_clustered_box(void)
155{
156 return 0;
157}
158#endif
159
160extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
161
162#else /* !CONFIG_X86_LOCAL_APIC */
163static inline void lapic_shutdown(void) { }
164#define local_apic_timer_c2_ok 1
165static inline void init_apic_mappings(void) { }
166static inline void disable_local_APIC(void) { }
167# define setup_boot_APIC_clock x86_init_noop
168# define setup_secondary_APIC_clock x86_init_noop
169#endif /* !CONFIG_X86_LOCAL_APIC */
170
118#ifdef CONFIG_X86_X2APIC 171#ifdef CONFIG_X86_X2APIC
119/* 172/*
120 * Make previous memory operations globally visible before 173 * Make previous memory operations globally visible before
@@ -186,67 +239,14 @@ static inline int x2apic_enabled(void)
186} 239}
187 240
188#define x2apic_supported() (cpu_has_x2apic) 241#define x2apic_supported() (cpu_has_x2apic)
189#else 242#else /* !CONFIG_X86_X2APIC */
190static inline void check_x2apic(void) { } 243static inline void check_x2apic(void) { }
191static inline void x2apic_setup(void) { } 244static inline void x2apic_setup(void) { }
192static inline int x2apic_enabled(void) { return 0; } 245static inline int x2apic_enabled(void) { return 0; }
193 246
194#define x2apic_mode (0) 247#define x2apic_mode (0)
195#define x2apic_supported() (0) 248#define x2apic_supported() (0)
196#endif 249#endif /* !CONFIG_X86_X2APIC */
197
198extern void enable_IR_x2apic(void);
199
200extern int get_physical_broadcast(void);
201
202extern int lapic_get_maxlvt(void);
203extern void clear_local_APIC(void);
204extern void disconnect_bsp_APIC(int virt_wire_setup);
205extern void disable_local_APIC(void);
206extern void lapic_shutdown(void);
207extern void sync_Arb_IDs(void);
208extern void init_bsp_APIC(void);
209extern void setup_local_APIC(void);
210extern void init_apic_mappings(void);
211void register_lapic_address(unsigned long address);
212extern void setup_boot_APIC_clock(void);
213extern void setup_secondary_APIC_clock(void);
214extern int APIC_init_uniprocessor(void);
215
216#ifdef CONFIG_X86_64
217static inline int apic_force_enable(unsigned long addr)
218{
219 return -1;
220}
221#else
222extern int apic_force_enable(unsigned long addr);
223#endif
224
225extern int apic_bsp_setup(bool upmode);
226extern void apic_ap_setup(void);
227
228/*
229 * On 32bit this is mach-xxx local
230 */
231#ifdef CONFIG_X86_64
232extern int apic_is_clustered_box(void);
233#else
234static inline int apic_is_clustered_box(void)
235{
236 return 0;
237}
238#endif
239
240extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
241
242#else /* !CONFIG_X86_LOCAL_APIC */
243static inline void lapic_shutdown(void) { }
244#define local_apic_timer_c2_ok 1
245static inline void init_apic_mappings(void) { }
246static inline void disable_local_APIC(void) { }
247# define setup_boot_APIC_clock x86_init_noop
248# define setup_secondary_APIC_clock x86_init_noop
249#endif /* !CONFIG_X86_LOCAL_APIC */
250 250
251#ifdef CONFIG_X86_64 251#ifdef CONFIG_X86_64
252#define SET_APIC_ID(x) (apic->set_apic_id(x)) 252#define SET_APIC_ID(x) (apic->set_apic_id(x))
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index fb52aa644aab..ae5fb83e6d91 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -24,7 +24,7 @@
24 */ 24 */
25static __always_inline int atomic_read(const atomic_t *v) 25static __always_inline int atomic_read(const atomic_t *v)
26{ 26{
27 return ACCESS_ONCE((v)->counter); 27 return READ_ONCE((v)->counter);
28} 28}
29 29
30/** 30/**
@@ -36,7 +36,7 @@ static __always_inline int atomic_read(const atomic_t *v)
36 */ 36 */
37static __always_inline void atomic_set(atomic_t *v, int i) 37static __always_inline void atomic_set(atomic_t *v, int i)
38{ 38{
39 v->counter = i; 39 WRITE_ONCE(v->counter, i);
40} 40}
41 41
42/** 42/**
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 50e33eff58de..037351022f54 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -18,7 +18,7 @@
18 */ 18 */
19static inline long atomic64_read(const atomic64_t *v) 19static inline long atomic64_read(const atomic64_t *v)
20{ 20{
21 return ACCESS_ONCE((v)->counter); 21 return READ_ONCE((v)->counter);
22} 22}
23 23
24/** 24/**
@@ -30,7 +30,7 @@ static inline long atomic64_read(const atomic64_t *v)
30 */ 30 */
31static inline void atomic64_set(atomic64_t *v, long i) 31static inline void atomic64_set(atomic64_t *v, long i)
32{ 32{
33 v->counter = i; 33 WRITE_ONCE(v->counter, i);
34} 34}
35 35
36/** 36/**
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
new file mode 100644
index 000000000000..b7a1ab865d68
--- /dev/null
+++ b/arch/x86/include/asm/dwarf2.h
@@ -0,0 +1,84 @@
1#ifndef _ASM_X86_DWARF2_H
2#define _ASM_X86_DWARF2_H
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 * Macros for dwarf2 CFI unwind table entries.
10 * See "as.info" for details on these pseudo ops. Unfortunately
11 * they are only supported in very new binutils, so define them
12 * away for older version.
13 */
14
15#ifdef CONFIG_AS_CFI
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30#define CFI_ESCAPE .cfi_escape
31
32#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
33#define CFI_SIGNAL_FRAME .cfi_signal_frame
34#else
35#define CFI_SIGNAL_FRAME
36#endif
37
38#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
39#ifndef BUILD_VDSO
40 /*
41 * Emit CFI data in .debug_frame sections, not .eh_frame sections.
42 * The latter we currently just discard since we don't do DWARF
43 * unwinding at runtime. So only the offline DWARF information is
44 * useful to anyone. Note we should not use this directive if
45 * vmlinux.lds.S gets changed so it doesn't discard .eh_frame.
46 */
47 .cfi_sections .debug_frame
48#else
49 /*
50 * For the vDSO, emit both runtime unwind information and debug
51 * symbols for the .dbg file.
52 */
53 .cfi_sections .eh_frame, .debug_frame
54#endif
55#endif
56
57#else
58
59/*
60 * Due to the structure of pre-exisiting code, don't use assembler line
61 * comment character # to ignore the arguments. Instead, use a dummy macro.
62 */
63.macro cfi_ignore a=0, b=0, c=0, d=0
64.endm
65
66#define CFI_STARTPROC cfi_ignore
67#define CFI_ENDPROC cfi_ignore
68#define CFI_DEF_CFA cfi_ignore
69#define CFI_DEF_CFA_REGISTER cfi_ignore
70#define CFI_DEF_CFA_OFFSET cfi_ignore
71#define CFI_ADJUST_CFA_OFFSET cfi_ignore
72#define CFI_OFFSET cfi_ignore
73#define CFI_REL_OFFSET cfi_ignore
74#define CFI_REGISTER cfi_ignore
75#define CFI_RESTORE cfi_ignore
76#define CFI_REMEMBER_STATE cfi_ignore
77#define CFI_RESTORE_STATE cfi_ignore
78#define CFI_UNDEFINED cfi_ignore
79#define CFI_ESCAPE cfi_ignore
80#define CFI_SIGNAL_FRAME cfi_ignore
81
82#endif
83
84#endif /* _ASM_X86_DWARF2_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index ae68be92f755..0010c78c4998 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -105,6 +105,7 @@ extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
105extern int __init efi_memblock_x86_reserve_range(void); 105extern int __init efi_memblock_x86_reserve_range(void);
106extern pgd_t * __init efi_call_phys_prolog(void); 106extern pgd_t * __init efi_call_phys_prolog(void);
107extern void __init efi_call_phys_epilog(pgd_t *save_pgd); 107extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
108extern void __init efi_print_memmap(void);
108extern void __init efi_unmap_memmap(void); 109extern void __init efi_unmap_memmap(void);
109extern void __init efi_memory_uc(u64 addr, unsigned long size); 110extern void __init efi_memory_uc(u64 addr, unsigned long size);
110extern void __init efi_map_region(efi_memory_desc_t *md); 111extern void __init efi_map_region(efi_memory_desc_t *md);
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 141c561f4664..1514753fd435 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -171,11 +171,11 @@ do { \
171static inline void elf_common_init(struct thread_struct *t, 171static inline void elf_common_init(struct thread_struct *t,
172 struct pt_regs *regs, const u16 ds) 172 struct pt_regs *regs, const u16 ds)
173{ 173{
174 /* Commented-out registers are cleared in stub_execve */ 174 /* ax gets execve's return value. */
175 /*regs->ax = regs->bx =*/ regs->cx = regs->dx = 0; 175 /*regs->ax = */ regs->bx = regs->cx = regs->dx = 0;
176 regs->si = regs->di /*= regs->bp*/ = 0; 176 regs->si = regs->di = regs->bp = 0;
177 regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0; 177 regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
178 /*regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;*/ 178 regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
179 t->fs = t->gs = 0; 179 t->fs = t->gs = 0;
180 t->fsindex = t->gsindex = 0; 180 t->fsindex = t->gsindex = 0;
181 t->ds = t->es = ds; 181 t->ds = t->es = ds;
@@ -328,7 +328,7 @@ else \
328 328
329#define VDSO_ENTRY \ 329#define VDSO_ENTRY \
330 ((unsigned long)current->mm->context.vdso + \ 330 ((unsigned long)current->mm->context.vdso + \
331 selected_vdso32->sym___kernel_vsyscall) 331 vdso_image_32.sym___kernel_vsyscall)
332 332
333struct linux_binprm; 333struct linux_binprm;
334 334
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 5fa9fb0f8809..cc285ec4b2c1 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -63,10 +63,10 @@
63/* hpet memory map physical address */ 63/* hpet memory map physical address */
64extern unsigned long hpet_address; 64extern unsigned long hpet_address;
65extern unsigned long force_hpet_address; 65extern unsigned long force_hpet_address;
66extern int boot_hpet_disable; 66extern bool boot_hpet_disable;
67extern u8 hpet_blockid; 67extern u8 hpet_blockid;
68extern int hpet_force_user; 68extern bool hpet_force_user;
69extern u8 hpet_msi_disable; 69extern bool hpet_msi_disable;
70extern int is_hpet_enabled(void); 70extern int is_hpet_enabled(void);
71extern int hpet_enable(void); 71extern int hpet_enable(void);
72extern void hpet_disable(void); 72extern void hpet_disable(void);
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index b130d59406fb..e5f5dc9787d5 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -29,11 +29,5 @@ extern void show_trace(struct task_struct *t, struct pt_regs *regs,
29extern void __show_regs(struct pt_regs *regs, int all); 29extern void __show_regs(struct pt_regs *regs, int all);
30extern unsigned long oops_begin(void); 30extern unsigned long oops_begin(void);
31extern void oops_end(unsigned long, struct pt_regs *, int signr); 31extern void oops_end(unsigned long, struct pt_regs *, int signr);
32#ifdef CONFIG_KEXEC_CORE
33extern int in_crash_kexec;
34#else
35/* no crash dump is ever in progress if no crash kernel can be kexec'd */
36#define in_crash_kexec 0
37#endif
38 32
39#endif /* _ASM_X86_KDEBUG_H */ 33#endif /* _ASM_X86_KDEBUG_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 2dbc0bf2b9f3..2ea4527e462f 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -123,19 +123,27 @@ struct mca_config {
123}; 123};
124 124
125struct mce_vendor_flags { 125struct mce_vendor_flags {
126 /* 126 /*
127 * overflow recovery cpuid bit indicates that overflow 127 * Indicates that overflow conditions are not fatal, when set.
128 * conditions are not fatal 128 */
129 */ 129 __u64 overflow_recov : 1,
130 __u64 overflow_recov : 1, 130
131 131 /*
132 /* 132 * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and
133 * SUCCOR stands for S/W UnCorrectable error COntainment 133 * Recovery. It indicates support for data poisoning in HW and deferred
134 * and Recovery. It indicates support for data poisoning 134 * error interrupts.
135 * in HW and deferred error interrupts. 135 */
136 */ 136 succor : 1,
137 succor : 1, 137
138 __reserved_0 : 62; 138 /*
139 * (AMD) SMCA: This bit indicates support for Scalable MCA which expands
140 * the register space for each MCA bank and also increases number of
141 * banks. Also, to accommodate the new banks and registers, the MCA
142 * register space is moved to a new MSR range.
143 */
144 smca : 1,
145
146 __reserved_0 : 61;
139}; 147};
140extern struct mce_vendor_flags mce_flags; 148extern struct mce_vendor_flags mce_flags;
141 149
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 9e6278c7140e..34e62b1dcfce 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -27,7 +27,6 @@ struct cpu_signature {
27struct device; 27struct device;
28 28
29enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; 29enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
30extern bool dis_ucode_ldr;
31 30
32struct microcode_ops { 31struct microcode_ops {
33 enum ucode_state (*request_microcode_user) (int cpu, 32 enum ucode_state (*request_microcode_user) (int cpu,
@@ -55,6 +54,12 @@ struct ucode_cpu_info {
55}; 54};
56extern struct ucode_cpu_info ucode_cpu_info[]; 55extern struct ucode_cpu_info ucode_cpu_info[];
57 56
57#ifdef CONFIG_MICROCODE
58int __init microcode_init(void);
59#else
60static inline int __init microcode_init(void) { return 0; };
61#endif
62
58#ifdef CONFIG_MICROCODE_INTEL 63#ifdef CONFIG_MICROCODE_INTEL
59extern struct microcode_ops * __init init_intel_microcode(void); 64extern struct microcode_ops * __init init_intel_microcode(void);
60#else 65#else
@@ -75,7 +80,6 @@ static inline struct microcode_ops * __init init_amd_microcode(void)
75static inline void __exit exit_amd_microcode(void) {} 80static inline void __exit exit_amd_microcode(void) {}
76#endif 81#endif
77 82
78#ifdef CONFIG_MICROCODE_EARLY
79#define MAX_UCODE_COUNT 128 83#define MAX_UCODE_COUNT 128
80 84
81#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) 85#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
@@ -150,22 +154,18 @@ static inline unsigned int x86_model(unsigned int sig)
150 return model; 154 return model;
151} 155}
152 156
157#ifdef CONFIG_MICROCODE
153extern void __init load_ucode_bsp(void); 158extern void __init load_ucode_bsp(void);
154extern void load_ucode_ap(void); 159extern void load_ucode_ap(void);
155extern int __init save_microcode_in_initrd(void); 160extern int __init save_microcode_in_initrd(void);
156void reload_early_microcode(void); 161void reload_early_microcode(void);
157extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); 162extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
158#else 163#else
159static inline void __init load_ucode_bsp(void) {} 164static inline void __init load_ucode_bsp(void) { }
160static inline void load_ucode_ap(void) {} 165static inline void load_ucode_ap(void) { }
161static inline int __init save_microcode_in_initrd(void) 166static inline int __init save_microcode_in_initrd(void) { return 0; }
162{ 167static inline void reload_early_microcode(void) { }
163 return 0; 168static inline bool
164} 169get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
165static inline void reload_early_microcode(void) {}
166static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name)
167{
168 return false;
169}
170#endif 170#endif
171#endif /* _ASM_X86_MICROCODE_H */ 171#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index ac6d328977a6..adfc847a395e 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -64,7 +64,7 @@ extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, s
64#define PATCH_MAX_SIZE PAGE_SIZE 64#define PATCH_MAX_SIZE PAGE_SIZE
65extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; 65extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
66 66
67#ifdef CONFIG_MICROCODE_AMD_EARLY 67#ifdef CONFIG_MICROCODE_AMD
68extern void __init load_ucode_amd_bsp(unsigned int family); 68extern void __init load_ucode_amd_bsp(unsigned int family);
69extern void load_ucode_amd_ap(void); 69extern void load_ucode_amd_ap(void);
70extern int __init save_microcode_in_initrd_amd(void); 70extern int __init save_microcode_in_initrd_amd(void);
@@ -76,4 +76,5 @@ static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
76void reload_ucode_amd(void) {} 76void reload_ucode_amd(void) {}
77#endif 77#endif
78 78
79extern bool check_current_patch_level(u32 *rev, bool early);
79#endif /* _ASM_X86_MICROCODE_AMD_H */ 80#endif /* _ASM_X86_MICROCODE_AMD_H */
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 7991c606125d..8559b0102ea1 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -57,7 +57,7 @@ extern int has_newer_microcode(void *mc, unsigned int csig, int cpf, int rev);
57extern int microcode_sanity_check(void *mc, int print_err); 57extern int microcode_sanity_check(void *mc, int print_err);
58extern int find_matching_signature(void *mc, unsigned int csig, int cpf); 58extern int find_matching_signature(void *mc, unsigned int csig, int cpf);
59 59
60#ifdef CONFIG_MICROCODE_INTEL_EARLY 60#ifdef CONFIG_MICROCODE_INTEL
61extern void __init load_ucode_intel_bsp(void); 61extern void __init load_ucode_intel_bsp(void);
62extern void load_ucode_intel_ap(void); 62extern void load_ucode_intel_ap(void);
63extern void show_ucode_info_early(void); 63extern void show_ucode_info_early(void);
@@ -71,13 +71,9 @@ static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL;
71static inline void reload_ucode_intel(void) {} 71static inline void reload_ucode_intel(void) {}
72#endif 72#endif
73 73
74#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) 74#ifdef CONFIG_HOTPLUG_CPU
75extern int save_mc_for_early(u8 *mc); 75extern int save_mc_for_early(u8 *mc);
76#else 76#else
77static inline int save_mc_for_early(u8 *mc) 77static inline int save_mc_for_early(u8 *mc) { return 0; }
78{
79 return 0;
80}
81#endif 78#endif
82
83#endif /* _ASM_X86_MICROCODE_INTEL_H */ 79#endif /* _ASM_X86_MICROCODE_INTEL_H */
diff --git a/arch/x86/include/asm/numachip/numachip.h b/arch/x86/include/asm/numachip/numachip.h
index 1c6f7f6212c1..c64373a2d731 100644
--- a/arch/x86/include/asm/numachip/numachip.h
+++ b/arch/x86/include/asm/numachip/numachip.h
@@ -14,6 +14,7 @@
14#ifndef _ASM_X86_NUMACHIP_NUMACHIP_H 14#ifndef _ASM_X86_NUMACHIP_NUMACHIP_H
15#define _ASM_X86_NUMACHIP_NUMACHIP_H 15#define _ASM_X86_NUMACHIP_NUMACHIP_H
16 16
17extern u8 numachip_system;
17extern int __init pci_numachip_init(void); 18extern int __init pci_numachip_init(void);
18 19
19#endif /* _ASM_X86_NUMACHIP_NUMACHIP_H */ 20#endif /* _ASM_X86_NUMACHIP_NUMACHIP_H */
diff --git a/arch/x86/include/asm/numachip/numachip_csr.h b/arch/x86/include/asm/numachip/numachip_csr.h
index 660f843df928..29719eecdc2e 100644
--- a/arch/x86/include/asm/numachip/numachip_csr.h
+++ b/arch/x86/include/asm/numachip/numachip_csr.h
@@ -14,12 +14,8 @@
14#ifndef _ASM_X86_NUMACHIP_NUMACHIP_CSR_H 14#ifndef _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
15#define _ASM_X86_NUMACHIP_NUMACHIP_CSR_H 15#define _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
16 16
17#include <linux/numa.h> 17#include <linux/smp.h>
18#include <linux/percpu.h>
19#include <linux/io.h> 18#include <linux/io.h>
20#include <linux/swab.h>
21#include <asm/types.h>
22#include <asm/processor.h>
23 19
24#define CSR_NODE_SHIFT 16 20#define CSR_NODE_SHIFT 16
25#define CSR_NODE_BITS(p) (((unsigned long)(p)) << CSR_NODE_SHIFT) 21#define CSR_NODE_BITS(p) (((unsigned long)(p)) << CSR_NODE_SHIFT)
@@ -27,11 +23,8 @@
27 23
28/* 32K CSR space, b15 indicates geo/non-geo */ 24/* 32K CSR space, b15 indicates geo/non-geo */
29#define CSR_OFFSET_MASK 0x7fffUL 25#define CSR_OFFSET_MASK 0x7fffUL
30 26#define CSR_G0_NODE_IDS (0x008 + (0 << 12))
31/* Global CSR space covers all 4K possible nodes with 64K CSR space per node */ 27#define CSR_G3_EXT_IRQ_GEN (0x030 + (3 << 12))
32#define NUMACHIP_GCSR_BASE 0x3fff00000000ULL
33#define NUMACHIP_GCSR_LIM 0x3fff0fffffffULL
34#define NUMACHIP_GCSR_SIZE (NUMACHIP_GCSR_LIM - NUMACHIP_GCSR_BASE + 1)
35 28
36/* 29/*
37 * Local CSR space starts in global CSR space with "nodeid" = 0xfff0, however 30 * Local CSR space starts in global CSR space with "nodeid" = 0xfff0, however
@@ -41,12 +34,7 @@
41#define NUMACHIP_LCSR_BASE 0x3ffffe000000ULL 34#define NUMACHIP_LCSR_BASE 0x3ffffe000000ULL
42#define NUMACHIP_LCSR_LIM 0x3fffffffffffULL 35#define NUMACHIP_LCSR_LIM 0x3fffffffffffULL
43#define NUMACHIP_LCSR_SIZE (NUMACHIP_LCSR_LIM - NUMACHIP_LCSR_BASE + 1) 36#define NUMACHIP_LCSR_SIZE (NUMACHIP_LCSR_LIM - NUMACHIP_LCSR_BASE + 1)
44 37#define NUMACHIP_LAPIC_BITS 8
45static inline void *gcsr_address(int node, unsigned long offset)
46{
47 return __va(NUMACHIP_GCSR_BASE | (1UL << 15) |
48 CSR_NODE_BITS(node & CSR_NODE_MASK) | (offset & CSR_OFFSET_MASK));
49}
50 38
51static inline void *lcsr_address(unsigned long offset) 39static inline void *lcsr_address(unsigned long offset)
52{ 40{
@@ -54,114 +42,57 @@ static inline void *lcsr_address(unsigned long offset)
54 CSR_NODE_BITS(0xfff0) | (offset & CSR_OFFSET_MASK)); 42 CSR_NODE_BITS(0xfff0) | (offset & CSR_OFFSET_MASK));
55} 43}
56 44
57static inline unsigned int read_gcsr(int node, unsigned long offset) 45static inline unsigned int read_lcsr(unsigned long offset)
58{ 46{
59 return swab32(readl(gcsr_address(node, offset))); 47 return swab32(readl(lcsr_address(offset)));
60} 48}
61 49
62static inline void write_gcsr(int node, unsigned long offset, unsigned int val) 50static inline void write_lcsr(unsigned long offset, unsigned int val)
63{ 51{
64 writel(swab32(val), gcsr_address(node, offset)); 52 writel(swab32(val), lcsr_address(offset));
65} 53}
66 54
67static inline unsigned int read_lcsr(unsigned long offset) 55/*
56 * On NumaChip2, local CSR space is 16MB and starts at fixed offset below 4G
57 */
58
59#define NUMACHIP2_LCSR_BASE 0xf0000000UL
60#define NUMACHIP2_LCSR_SIZE 0x1000000UL
61#define NUMACHIP2_APIC_ICR 0x100000
62#define NUMACHIP2_TIMER_DEADLINE 0x200000
63#define NUMACHIP2_TIMER_INT 0x200008
64#define NUMACHIP2_TIMER_NOW 0x200018
65#define NUMACHIP2_TIMER_RESET 0x200020
66
67static inline void __iomem *numachip2_lcsr_address(unsigned long offset)
68{ 68{
69 return swab32(readl(lcsr_address(offset))); 69 return (void __iomem *)__va(NUMACHIP2_LCSR_BASE |
70 (offset & (NUMACHIP2_LCSR_SIZE - 1)));
70} 71}
71 72
72static inline void write_lcsr(unsigned long offset, unsigned int val) 73static inline u32 numachip2_read32_lcsr(unsigned long offset)
73{ 74{
74 writel(swab32(val), lcsr_address(offset)); 75 return readl(numachip2_lcsr_address(offset));
75} 76}
76 77
77/* ========================================================================= */ 78static inline u64 numachip2_read64_lcsr(unsigned long offset)
78/* CSR_G0_STATE_CLEAR */ 79{
79/* ========================================================================= */ 80 return readq(numachip2_lcsr_address(offset));
80 81}
81#define CSR_G0_STATE_CLEAR (0x000 + (0 << 12))
82union numachip_csr_g0_state_clear {
83 unsigned int v;
84 struct numachip_csr_g0_state_clear_s {
85 unsigned int _state:2;
86 unsigned int _rsvd_2_6:5;
87 unsigned int _lost:1;
88 unsigned int _rsvd_8_31:24;
89 } s;
90};
91
92/* ========================================================================= */
93/* CSR_G0_NODE_IDS */
94/* ========================================================================= */
95 82
96#define CSR_G0_NODE_IDS (0x008 + (0 << 12)) 83static inline void numachip2_write32_lcsr(unsigned long offset, u32 val)
97union numachip_csr_g0_node_ids { 84{
98 unsigned int v; 85 writel(val, numachip2_lcsr_address(offset));
99 struct numachip_csr_g0_node_ids_s { 86}
100 unsigned int _initialid:16;
101 unsigned int _nodeid:12;
102 unsigned int _rsvd_28_31:4;
103 } s;
104};
105
106/* ========================================================================= */
107/* CSR_G3_EXT_IRQ_GEN */
108/* ========================================================================= */
109 87
110#define CSR_G3_EXT_IRQ_GEN (0x030 + (3 << 12)) 88static inline void numachip2_write64_lcsr(unsigned long offset, u64 val)
111union numachip_csr_g3_ext_irq_gen { 89{
112 unsigned int v; 90 writeq(val, numachip2_lcsr_address(offset));
113 struct numachip_csr_g3_ext_irq_gen_s { 91}
114 unsigned int _vector:8;
115 unsigned int _msgtype:3;
116 unsigned int _index:5;
117 unsigned int _destination_apic_id:16;
118 } s;
119};
120
121/* ========================================================================= */
122/* CSR_G3_EXT_IRQ_STATUS */
123/* ========================================================================= */
124
125#define CSR_G3_EXT_IRQ_STATUS (0x034 + (3 << 12))
126union numachip_csr_g3_ext_irq_status {
127 unsigned int v;
128 struct numachip_csr_g3_ext_irq_status_s {
129 unsigned int _result:32;
130 } s;
131};
132
133/* ========================================================================= */
134/* CSR_G3_EXT_IRQ_DEST */
135/* ========================================================================= */
136
137#define CSR_G3_EXT_IRQ_DEST (0x038 + (3 << 12))
138union numachip_csr_g3_ext_irq_dest {
139 unsigned int v;
140 struct numachip_csr_g3_ext_irq_dest_s {
141 unsigned int _irq:8;
142 unsigned int _rsvd_8_31:24;
143 } s;
144};
145
146/* ========================================================================= */
147/* CSR_G3_NC_ATT_MAP_SELECT */
148/* ========================================================================= */
149
150#define CSR_G3_NC_ATT_MAP_SELECT (0x7fc + (3 << 12))
151union numachip_csr_g3_nc_att_map_select {
152 unsigned int v;
153 struct numachip_csr_g3_nc_att_map_select_s {
154 unsigned int _upper_address_bits:4;
155 unsigned int _select_ram:4;
156 unsigned int _rsvd_8_31:24;
157 } s;
158};
159
160/* ========================================================================= */
161/* CSR_G3_NC_ATT_MAP_SELECT_0-255 */
162/* ========================================================================= */
163
164#define CSR_G3_NC_ATT_MAP_SELECT_0 (0x800 + (3 << 12))
165 92
166#endif /* _ASM_X86_NUMACHIP_NUMACHIP_CSR_H */ 93static inline unsigned int numachip2_timer(void)
94{
95 return (smp_processor_id() % 48) << 6;
96}
167 97
98#endif /* _ASM_X86_NUMACHIP_NUMACHIP_CSR_H */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index b12f81022a6b..01bcde84d3e4 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -30,12 +30,9 @@ static __always_inline void preempt_count_set(int pc)
30/* 30/*
31 * must be macros to avoid header recursion hell 31 * must be macros to avoid header recursion hell
32 */ 32 */
33#define init_task_preempt_count(p) do { \ 33#define init_task_preempt_count(p) do { } while (0)
34 task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
35} while (0)
36 34
37#define init_idle_preempt_count(p, cpu) do { \ 35#define init_idle_preempt_count(p, cpu) do { \
38 task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
39 per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ 36 per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
40} while (0) 37} while (0)
41 38
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 19577dd325fa..b55f30960554 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -556,12 +556,12 @@ static inline unsigned int cpuid_edx(unsigned int op)
556} 556}
557 557
558/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 558/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
559static inline void rep_nop(void) 559static __always_inline void rep_nop(void)
560{ 560{
561 asm volatile("rep; nop" ::: "memory"); 561 asm volatile("rep; nop" ::: "memory");
562} 562}
563 563
564static inline void cpu_relax(void) 564static __always_inline void cpu_relax(void)
565{ 565{
566 rep_nop(); 566 rep_nop();
567} 567}
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index e4661196994e..ff8b9a17dc4b 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -27,12 +27,11 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
27 function. */ 27 function. */
28 28
29#define __HAVE_ARCH_MEMCPY 1 29#define __HAVE_ARCH_MEMCPY 1
30extern void *memcpy(void *to, const void *from, size_t len);
30extern void *__memcpy(void *to, const void *from, size_t len); 31extern void *__memcpy(void *to, const void *from, size_t len);
31 32
32#ifndef CONFIG_KMEMCHECK 33#ifndef CONFIG_KMEMCHECK
33#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 34#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
34extern void *memcpy(void *to, const void *from, size_t len);
35#else
36#define memcpy(dst, src, len) \ 35#define memcpy(dst, src, len) \
37({ \ 36({ \
38 size_t __len = (len); \ 37 size_t __len = (len); \
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index d7f3b3b78ac3..751bf4b7bf11 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -79,12 +79,12 @@ do { \
79#else /* CONFIG_X86_32 */ 79#else /* CONFIG_X86_32 */
80 80
81/* frame pointer must be last for get_wchan */ 81/* frame pointer must be last for get_wchan */
82#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" 82#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" 83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
84 84
85#define __EXTRA_CLOBBER \ 85#define __EXTRA_CLOBBER \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ 86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15" 87 "r12", "r13", "r14", "r15", "flags"
88 88
89#ifdef CONFIG_CC_STACKPROTECTOR 89#ifdef CONFIG_CC_STACKPROTECTOR
90#define __switch_canary \ 90#define __switch_canary \
@@ -100,7 +100,11 @@ do { \
100#define __switch_canary_iparam 100#define __switch_canary_iparam
101#endif /* CC_STACKPROTECTOR */ 101#endif /* CC_STACKPROTECTOR */
102 102
103/* Save restore flags to clear handle leaking NT */ 103/*
104 * There is no need to save or restore flags, because flags are always
105 * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
106 * has no effect.
107 */
104#define switch_to(prev, next, last) \ 108#define switch_to(prev, next, last) \
105 asm volatile(SAVE_CONTEXT \ 109 asm volatile(SAVE_CONTEXT \
106 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 110 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index d6a756ae04c8..999b7cd2e78c 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -20,9 +20,21 @@
20#include <asm/thread_info.h> /* for TS_COMPAT */ 20#include <asm/thread_info.h> /* for TS_COMPAT */
21#include <asm/unistd.h> 21#include <asm/unistd.h>
22 22
23typedef void (*sys_call_ptr_t)(void); 23typedef asmlinkage long (*sys_call_ptr_t)(unsigned long, unsigned long,
24 unsigned long, unsigned long,
25 unsigned long, unsigned long);
24extern const sys_call_ptr_t sys_call_table[]; 26extern const sys_call_ptr_t sys_call_table[];
25 27
28#if defined(CONFIG_X86_32)
29#define ia32_sys_call_table sys_call_table
30#define __NR_syscall_compat_max __NR_syscall_max
31#define IA32_NR_syscalls NR_syscalls
32#endif
33
34#if defined(CONFIG_IA32_EMULATION)
35extern const sys_call_ptr_t ia32_sys_call_table[];
36#endif
37
26/* 38/*
27 * Only the low 32 bits of orig_ax are meaningful, so we return int. 39 * Only the low 32 bits of orig_ax are meaningful, so we return int.
28 * This importantly ignores the high bits on 64-bit, so comparisons 40 * This importantly ignores the high bits on 64-bit, so comparisons
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 8afdc3e44247..c7b551028740 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -57,9 +57,7 @@ struct thread_info {
57 __u32 flags; /* low level flags */ 57 __u32 flags; /* low level flags */
58 __u32 status; /* thread synchronous flags */ 58 __u32 status; /* thread synchronous flags */
59 __u32 cpu; /* current CPU */ 59 __u32 cpu; /* current CPU */
60 int saved_preempt_count;
61 mm_segment_t addr_limit; 60 mm_segment_t addr_limit;
62 void __user *sysenter_return;
63 unsigned int sig_on_uaccess_error:1; 61 unsigned int sig_on_uaccess_error:1;
64 unsigned int uaccess_err:1; /* uaccess failed */ 62 unsigned int uaccess_err:1; /* uaccess failed */
65}; 63};
@@ -69,7 +67,6 @@ struct thread_info {
69 .task = &tsk, \ 67 .task = &tsk, \
70 .flags = 0, \ 68 .flags = 0, \
71 .cpu = 0, \ 69 .cpu = 0, \
72 .saved_preempt_count = INIT_PREEMPT_COUNT, \
73 .addr_limit = KERNEL_DS, \ 70 .addr_limit = KERNEL_DS, \
74} 71}
75 72
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a8df874f3e88..09b1b0ab94b7 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -51,13 +51,13 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
51 * limit, not add it to the address). 51 * limit, not add it to the address).
52 */ 52 */
53 if (__builtin_constant_p(size)) 53 if (__builtin_constant_p(size))
54 return addr > limit - size; 54 return unlikely(addr > limit - size);
55 55
56 /* Arbitrary sizes? Be careful about overflow */ 56 /* Arbitrary sizes? Be careful about overflow */
57 addr += size; 57 addr += size;
58 if (addr < size) 58 if (unlikely(addr < size))
59 return true; 59 return true;
60 return addr > limit; 60 return unlikely(addr > limit);
61} 61}
62 62
63#define __range_not_ok(addr, size, limit) \ 63#define __range_not_ok(addr, size, limit) \
@@ -182,7 +182,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
182 : "=a" (__ret_gu), "=r" (__val_gu) \ 182 : "=a" (__ret_gu), "=r" (__val_gu) \
183 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 183 : "0" (ptr), "i" (sizeof(*(ptr)))); \
184 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 184 (x) = (__force __typeof__(*(ptr))) __val_gu; \
185 __ret_gu; \ 185 __builtin_expect(__ret_gu, 0); \
186}) 186})
187 187
188#define __put_user_x(size, x, ptr, __ret_pu) \ 188#define __put_user_x(size, x, ptr, __ret_pu) \
@@ -278,7 +278,7 @@ extern void __put_user_8(void);
278 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 278 __put_user_x(X, __pu_val, ptr, __ret_pu); \
279 break; \ 279 break; \
280 } \ 280 } \
281 __ret_pu; \ 281 __builtin_expect(__ret_pu, 0); \
282}) 282})
283 283
284#define __put_user_size(x, ptr, size, retval, errret) \ 284#define __put_user_size(x, ptr, size, retval, errret) \
@@ -401,7 +401,7 @@ do { \
401({ \ 401({ \
402 int __pu_err; \ 402 int __pu_err; \
403 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 403 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
404 __pu_err; \ 404 __builtin_expect(__pu_err, 0); \
405}) 405})
406 406
407#define __get_user_nocheck(x, ptr, size) \ 407#define __get_user_nocheck(x, ptr, size) \
@@ -410,7 +410,7 @@ do { \
410 unsigned long __gu_val; \ 410 unsigned long __gu_val; \
411 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 411 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
412 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 412 (x) = (__force __typeof__(*(ptr)))__gu_val; \
413 __gu_err; \ 413 __builtin_expect(__gu_err, 0); \
414}) 414})
415 415
416/* FIXME: this hack is definitely wrong -AK */ 416/* FIXME: this hack is definitely wrong -AK */
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 8021bd28c0f1..756de9190aec 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -26,7 +26,7 @@ struct vdso_image {
26 long sym___kernel_sigreturn; 26 long sym___kernel_sigreturn;
27 long sym___kernel_rt_sigreturn; 27 long sym___kernel_rt_sigreturn;
28 long sym___kernel_vsyscall; 28 long sym___kernel_vsyscall;
29 long sym_VDSO32_SYSENTER_RETURN; 29 long sym_int80_landing_pad;
30}; 30};
31 31
32#ifdef CONFIG_X86_64 32#ifdef CONFIG_X86_64
@@ -38,13 +38,7 @@ extern const struct vdso_image vdso_image_x32;
38#endif 38#endif
39 39
40#if defined CONFIG_X86_32 || defined CONFIG_COMPAT 40#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
41extern const struct vdso_image vdso_image_32_int80; 41extern const struct vdso_image vdso_image_32;
42#ifdef CONFIG_COMPAT
43extern const struct vdso_image vdso_image_32_syscall;
44#endif
45extern const struct vdso_image vdso_image_32_sysenter;
46
47extern const struct vdso_image *selected_vdso32;
48#endif 42#endif
49 43
50extern void __init init_vdso_image(const struct vdso_image *image); 44extern void __init init_vdso_image(const struct vdso_image *image);
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 76880ede9a35..03429da2fa80 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -2,7 +2,7 @@
2#define _UAPI_ASM_X86_MCE_H 2#define _UAPI_ASM_X86_MCE_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/ioctls.h> 5#include <linux/ioctl.h>
6 6
7/* Fields are zero when not available */ 7/* Fields are zero when not available */
8struct mce { 8struct mce {
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 24e94ce454e2..2f69e3b184f6 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1431,7 +1431,7 @@ enum {
1431}; 1431};
1432static int x2apic_state; 1432static int x2apic_state;
1433 1433
1434static inline void __x2apic_disable(void) 1434static void __x2apic_disable(void)
1435{ 1435{
1436 u64 msr; 1436 u64 msr;
1437 1437
@@ -1447,7 +1447,7 @@ static inline void __x2apic_disable(void)
1447 printk_once(KERN_INFO "x2apic disabled\n"); 1447 printk_once(KERN_INFO "x2apic disabled\n");
1448} 1448}
1449 1449
1450static inline void __x2apic_enable(void) 1450static void __x2apic_enable(void)
1451{ 1451{
1452 u64 msr; 1452 u64 msr;
1453 1453
@@ -1807,7 +1807,7 @@ int apic_version[MAX_LOCAL_APIC];
1807/* 1807/*
1808 * This interrupt should _never_ happen with our APIC/SMP architecture 1808 * This interrupt should _never_ happen with our APIC/SMP architecture
1809 */ 1809 */
1810static inline void __smp_spurious_interrupt(u8 vector) 1810static void __smp_spurious_interrupt(u8 vector)
1811{ 1811{
1812 u32 v; 1812 u32 v;
1813 1813
@@ -1848,7 +1848,7 @@ __visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
1848/* 1848/*
1849 * This interrupt should never happen with our APIC/SMP architecture 1849 * This interrupt should never happen with our APIC/SMP architecture
1850 */ 1850 */
1851static inline void __smp_error_interrupt(struct pt_regs *regs) 1851static void __smp_error_interrupt(struct pt_regs *regs)
1852{ 1852{
1853 u32 v; 1853 u32 v;
1854 u32 i = 0; 1854 u32 i = 0;
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index b548fd3b764b..38dd5efdd04c 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -11,30 +11,21 @@
11 * 11 *
12 */ 12 */
13 13
14#include <linux/errno.h>
15#include <linux/threads.h>
16#include <linux/cpumask.h>
17#include <linux/string.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/ctype.h>
21#include <linux/init.h> 14#include <linux/init.h>
22#include <linux/hardirq.h>
23#include <linux/delay.h>
24 15
25#include <asm/numachip/numachip.h> 16#include <asm/numachip/numachip.h>
26#include <asm/numachip/numachip_csr.h> 17#include <asm/numachip/numachip_csr.h>
27#include <asm/smp.h>
28#include <asm/apic.h>
29#include <asm/ipi.h> 18#include <asm/ipi.h>
30#include <asm/apic_flat_64.h> 19#include <asm/apic_flat_64.h>
31#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/pci_x86.h>
32 22
33static int numachip_system __read_mostly; 23u8 numachip_system __read_mostly;
24static const struct apic apic_numachip1;
25static const struct apic apic_numachip2;
26static void (*numachip_apic_icr_write)(int apicid, unsigned int val) __read_mostly;
34 27
35static const struct apic apic_numachip; 28static unsigned int numachip1_get_apic_id(unsigned long x)
36
37static unsigned int get_apic_id(unsigned long x)
38{ 29{
39 unsigned long value; 30 unsigned long value;
40 unsigned int id = (x >> 24) & 0xff; 31 unsigned int id = (x >> 24) & 0xff;
@@ -47,7 +38,7 @@ static unsigned int get_apic_id(unsigned long x)
47 return id; 38 return id;
48} 39}
49 40
50static unsigned long set_apic_id(unsigned int id) 41static unsigned long numachip1_set_apic_id(unsigned int id)
51{ 42{
52 unsigned long x; 43 unsigned long x;
53 44
@@ -55,9 +46,17 @@ static unsigned long set_apic_id(unsigned int id)
55 return x; 46 return x;
56} 47}
57 48
58static unsigned int read_xapic_id(void) 49static unsigned int numachip2_get_apic_id(unsigned long x)
50{
51 u64 mcfg;
52
53 rdmsrl(MSR_FAM10H_MMIO_CONF_BASE, mcfg);
54 return ((mcfg >> (28 - 8)) & 0xfff00) | (x >> 24);
55}
56
57static unsigned long numachip2_set_apic_id(unsigned int id)
59{ 58{
60 return get_apic_id(apic_read(APIC_ID)); 59 return id << 24;
61} 60}
62 61
63static int numachip_apic_id_valid(int apicid) 62static int numachip_apic_id_valid(int apicid)
@@ -68,7 +67,7 @@ static int numachip_apic_id_valid(int apicid)
68 67
69static int numachip_apic_id_registered(void) 68static int numachip_apic_id_registered(void)
70{ 69{
71 return physid_isset(read_xapic_id(), phys_cpu_present_map); 70 return 1;
72} 71}
73 72
74static int numachip_phys_pkg_id(int initial_apic_id, int index_msb) 73static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
@@ -76,36 +75,48 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
76 return initial_apic_id >> index_msb; 75 return initial_apic_id >> index_msb;
77} 76}
78 77
79static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) 78static void numachip1_apic_icr_write(int apicid, unsigned int val)
80{ 79{
81 union numachip_csr_g3_ext_irq_gen int_gen; 80 write_lcsr(CSR_G3_EXT_IRQ_GEN, (apicid << 16) | val);
82 81}
83 int_gen.s._destination_apic_id = phys_apicid;
84 int_gen.s._vector = 0;
85 int_gen.s._msgtype = APIC_DM_INIT >> 8;
86 int_gen.s._index = 0;
87
88 write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
89 82
90 int_gen.s._msgtype = APIC_DM_STARTUP >> 8; 83static void numachip2_apic_icr_write(int apicid, unsigned int val)
91 int_gen.s._vector = start_rip >> 12; 84{
85 numachip2_write32_lcsr(NUMACHIP2_APIC_ICR, (apicid << 12) | val);
86}
92 87
93 write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v); 88static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
89{
90 numachip_apic_icr_write(phys_apicid, APIC_DM_INIT);
91 numachip_apic_icr_write(phys_apicid, APIC_DM_STARTUP |
92 (start_rip >> 12));
94 93
95 return 0; 94 return 0;
96} 95}
97 96
98static void numachip_send_IPI_one(int cpu, int vector) 97static void numachip_send_IPI_one(int cpu, int vector)
99{ 98{
100 union numachip_csr_g3_ext_irq_gen int_gen; 99 int local_apicid, apicid = per_cpu(x86_cpu_to_apicid, cpu);
101 int apicid = per_cpu(x86_cpu_to_apicid, cpu); 100 unsigned int dmode;
102 101
103 int_gen.s._destination_apic_id = apicid; 102 preempt_disable();
104 int_gen.s._vector = vector; 103 local_apicid = __this_cpu_read(x86_cpu_to_apicid);
105 int_gen.s._msgtype = (vector == NMI_VECTOR ? APIC_DM_NMI : APIC_DM_FIXED) >> 8; 104
106 int_gen.s._index = 0; 105 /* Send via local APIC where non-local part matches */
106 if (!((apicid ^ local_apicid) >> NUMACHIP_LAPIC_BITS)) {
107 unsigned long flags;
108
109 local_irq_save(flags);
110 __default_send_IPI_dest_field(apicid, vector,
111 APIC_DEST_PHYSICAL);
112 local_irq_restore(flags);
113 preempt_enable();
114 return;
115 }
116 preempt_enable();
107 117
108 write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v); 118 dmode = (vector == NMI_VECTOR) ? APIC_DM_NMI : APIC_DM_FIXED;
119 numachip_apic_icr_write(apicid, dmode | vector);
109} 120}
110 121
111static void numachip_send_IPI_mask(const struct cpumask *mask, int vector) 122static void numachip_send_IPI_mask(const struct cpumask *mask, int vector)
@@ -149,9 +160,14 @@ static void numachip_send_IPI_self(int vector)
149 apic_write(APIC_SELF_IPI, vector); 160 apic_write(APIC_SELF_IPI, vector);
150} 161}
151 162
152static int __init numachip_probe(void) 163static int __init numachip1_probe(void)
153{ 164{
154 return apic == &apic_numachip; 165 return apic == &apic_numachip1;
166}
167
168static int __init numachip2_probe(void)
169{
170 return apic == &apic_numachip2;
155} 171}
156 172
157static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) 173static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
@@ -172,34 +188,118 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
172 188
173static int __init numachip_system_init(void) 189static int __init numachip_system_init(void)
174{ 190{
175 if (!numachip_system) 191 /* Map the LCSR area and set up the apic_icr_write function */
192 switch (numachip_system) {
193 case 1:
194 init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE);
195 numachip_apic_icr_write = numachip1_apic_icr_write;
196 x86_init.pci.arch_init = pci_numachip_init;
197 break;
198 case 2:
199 init_extra_mapping_uc(NUMACHIP2_LCSR_BASE, NUMACHIP2_LCSR_SIZE);
200 numachip_apic_icr_write = numachip2_apic_icr_write;
201
202 /* Use MCFG config cycles rather than locked CF8 cycles */
203 raw_pci_ops = &pci_mmcfg;
204 break;
205 default:
176 return 0; 206 return 0;
177 207 }
178 init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE);
179 init_extra_mapping_uc(NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_SIZE);
180 208
181 x86_cpuinit.fixup_cpu_id = fixup_cpu_id; 209 x86_cpuinit.fixup_cpu_id = fixup_cpu_id;
182 x86_init.pci.arch_init = pci_numachip_init;
183 210
184 return 0; 211 return 0;
185} 212}
186early_initcall(numachip_system_init); 213early_initcall(numachip_system_init);
187 214
188static int numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 215static int numachip1_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
189{ 216{
190 if (!strncmp(oem_id, "NUMASC", 6)) { 217 if ((strncmp(oem_id, "NUMASC", 6) != 0) ||
191 numachip_system = 1; 218 (strncmp(oem_table_id, "NCONNECT", 8) != 0))
192 return 1; 219 return 0;
193 }
194 220
195 return 0; 221 numachip_system = 1;
222
223 return 1;
224}
225
226static int numachip2_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
227{
228 if ((strncmp(oem_id, "NUMASC", 6) != 0) ||
229 (strncmp(oem_table_id, "NCONECT2", 8) != 0))
230 return 0;
231
232 numachip_system = 2;
233
234 return 1;
235}
236
237/* APIC IPIs are queued */
238static void numachip_apic_wait_icr_idle(void)
239{
196} 240}
197 241
198static const struct apic apic_numachip __refconst = { 242/* APIC NMI IPIs are queued */
243static u32 numachip_safe_apic_wait_icr_idle(void)
244{
245 return 0;
246}
199 247
248static const struct apic apic_numachip1 __refconst = {
200 .name = "NumaConnect system", 249 .name = "NumaConnect system",
201 .probe = numachip_probe, 250 .probe = numachip1_probe,
202 .acpi_madt_oem_check = numachip_acpi_madt_oem_check, 251 .acpi_madt_oem_check = numachip1_acpi_madt_oem_check,
252 .apic_id_valid = numachip_apic_id_valid,
253 .apic_id_registered = numachip_apic_id_registered,
254
255 .irq_delivery_mode = dest_Fixed,
256 .irq_dest_mode = 0, /* physical */
257
258 .target_cpus = online_target_cpus,
259 .disable_esr = 0,
260 .dest_logical = 0,
261 .check_apicid_used = NULL,
262
263 .vector_allocation_domain = default_vector_allocation_domain,
264 .init_apic_ldr = flat_init_apic_ldr,
265
266 .ioapic_phys_id_map = NULL,
267 .setup_apic_routing = NULL,
268 .cpu_present_to_apicid = default_cpu_present_to_apicid,
269 .apicid_to_cpu_present = NULL,
270 .check_phys_apicid_present = default_check_phys_apicid_present,
271 .phys_pkg_id = numachip_phys_pkg_id,
272
273 .get_apic_id = numachip1_get_apic_id,
274 .set_apic_id = numachip1_set_apic_id,
275 .apic_id_mask = 0xffU << 24,
276
277 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
278
279 .send_IPI_mask = numachip_send_IPI_mask,
280 .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself,
281 .send_IPI_allbutself = numachip_send_IPI_allbutself,
282 .send_IPI_all = numachip_send_IPI_all,
283 .send_IPI_self = numachip_send_IPI_self,
284
285 .wakeup_secondary_cpu = numachip_wakeup_secondary,
286 .inquire_remote_apic = NULL, /* REMRD not supported */
287
288 .read = native_apic_mem_read,
289 .write = native_apic_mem_write,
290 .eoi_write = native_apic_mem_write,
291 .icr_read = native_apic_icr_read,
292 .icr_write = native_apic_icr_write,
293 .wait_icr_idle = numachip_apic_wait_icr_idle,
294 .safe_wait_icr_idle = numachip_safe_apic_wait_icr_idle,
295};
296
297apic_driver(apic_numachip1);
298
299static const struct apic apic_numachip2 __refconst = {
300 .name = "NumaConnect2 system",
301 .probe = numachip2_probe,
302 .acpi_madt_oem_check = numachip2_acpi_madt_oem_check,
203 .apic_id_valid = numachip_apic_id_valid, 303 .apic_id_valid = numachip_apic_id_valid,
204 .apic_id_registered = numachip_apic_id_registered, 304 .apic_id_registered = numachip_apic_id_registered,
205 305
@@ -221,8 +321,8 @@ static const struct apic apic_numachip __refconst = {
221 .check_phys_apicid_present = default_check_phys_apicid_present, 321 .check_phys_apicid_present = default_check_phys_apicid_present,
222 .phys_pkg_id = numachip_phys_pkg_id, 322 .phys_pkg_id = numachip_phys_pkg_id,
223 323
224 .get_apic_id = get_apic_id, 324 .get_apic_id = numachip2_get_apic_id,
225 .set_apic_id = set_apic_id, 325 .set_apic_id = numachip2_set_apic_id,
226 .apic_id_mask = 0xffU << 24, 326 .apic_id_mask = 0xffU << 24,
227 327
228 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, 328 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
@@ -241,8 +341,8 @@ static const struct apic apic_numachip __refconst = {
241 .eoi_write = native_apic_mem_write, 341 .eoi_write = native_apic_mem_write,
242 .icr_read = native_apic_icr_read, 342 .icr_read = native_apic_icr_read,
243 .icr_write = native_apic_icr_write, 343 .icr_write = native_apic_icr_write,
244 .wait_icr_idle = native_apic_wait_icr_idle, 344 .wait_icr_idle = numachip_apic_wait_icr_idle,
245 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, 345 .safe_wait_icr_idle = numachip_safe_apic_wait_icr_idle,
246}; 346};
247apic_driver(apic_numachip);
248 347
348apic_driver(apic_numachip2);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 5c60bb162622..f25321894ad2 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -529,7 +529,7 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector)
529 } 529 }
530} 530}
531 531
532void eoi_ioapic_pin(int vector, struct mp_chip_data *data) 532static void eoi_ioapic_pin(int vector, struct mp_chip_data *data)
533{ 533{
534 unsigned long flags; 534 unsigned long flags;
535 struct irq_pin_list *entry; 535 struct irq_pin_list *entry;
@@ -2547,7 +2547,9 @@ void __init setup_ioapic_dest(void)
2547 mask = apic->target_cpus(); 2547 mask = apic->target_cpus();
2548 2548
2549 chip = irq_data_get_irq_chip(idata); 2549 chip = irq_data_get_irq_chip(idata);
2550 chip->irq_set_affinity(idata, mask, false); 2550 /* Might be lapic_chip for irq 0 */
2551 if (chip->irq_set_affinity)
2552 chip->irq_set_affinity(idata, mask, false);
2551 } 2553 }
2552} 2554}
2553#endif 2555#endif
@@ -2907,6 +2909,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2907 struct irq_data *irq_data; 2909 struct irq_data *irq_data;
2908 struct mp_chip_data *data; 2910 struct mp_chip_data *data;
2909 struct irq_alloc_info *info = arg; 2911 struct irq_alloc_info *info = arg;
2912 unsigned long flags;
2910 2913
2911 if (!info || nr_irqs > 1) 2914 if (!info || nr_irqs > 1)
2912 return -EINVAL; 2915 return -EINVAL;
@@ -2939,11 +2942,14 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2939 2942
2940 cfg = irqd_cfg(irq_data); 2943 cfg = irqd_cfg(irq_data);
2941 add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); 2944 add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
2945
2946 local_irq_save(flags);
2942 if (info->ioapic_entry) 2947 if (info->ioapic_entry)
2943 mp_setup_entry(cfg, data, info->ioapic_entry); 2948 mp_setup_entry(cfg, data, info->ioapic_entry);
2944 mp_register_handler(virq, data->trigger); 2949 mp_register_handler(virq, data->trigger);
2945 if (virq < nr_legacy_irqs()) 2950 if (virq < nr_legacy_irqs())
2946 legacy_pic->mask(virq); 2951 legacy_pic->mask(virq);
2952 local_irq_restore(flags);
2947 2953
2948 apic_printk(APIC_VERBOSE, KERN_DEBUG 2954 apic_printk(APIC_VERBOSE, KERN_DEBUG
2949 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n", 2955 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n",
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 8e3d22a1af94..95a18e25d5bf 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -54,9 +54,6 @@ void common(void) {
54 OFFSET(IA32_SIGCONTEXT_ip, sigcontext_ia32, ip); 54 OFFSET(IA32_SIGCONTEXT_ip, sigcontext_ia32, ip);
55 55
56 BLANK(); 56 BLANK();
57 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
58
59 BLANK();
60 OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); 57 OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
61#endif 58#endif
62 59
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 4eb065c6bed2..58031303e304 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
41obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 41obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
42obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o perf_event_intel_cqm.o 42obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o perf_event_intel_cqm.o
43obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_pt.o perf_event_intel_bts.o 43obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_pt.o perf_event_intel_bts.o
44obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_cstate.o
44 45
45obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \ 46obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \
46 perf_event_intel_uncore_snb.o \ 47 perf_event_intel_uncore_snb.o \
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index be4febc58b94..e38d338a6447 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -157,7 +157,7 @@ struct _cpuid4_info_regs {
157 struct amd_northbridge *nb; 157 struct amd_northbridge *nb;
158}; 158};
159 159
160unsigned short num_cache_leaves; 160static unsigned short num_cache_leaves;
161 161
162/* AMD doesn't have CPUID4. Emulate it here to report the same 162/* AMD doesn't have CPUID4. Emulate it here to report the same
163 information to the user. This makes some assumptions about the machine: 163 information to the user. This makes some assumptions about the machine:
@@ -326,7 +326,7 @@ static void amd_calc_l3_indices(struct amd_northbridge *nb)
326 * 326 *
327 * @returns: the disabled index if used or negative value if slot free. 327 * @returns: the disabled index if used or negative value if slot free.
328 */ 328 */
329int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) 329static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
330{ 330{
331 unsigned int reg = 0; 331 unsigned int reg = 0;
332 332
@@ -403,8 +403,8 @@ static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
403 * 403 *
404 * @return: 0 on success, error status on failure 404 * @return: 0 on success, error status on failure
405 */ 405 */
406int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, 406static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
407 unsigned long index) 407 unsigned slot, unsigned long index)
408{ 408{
409 int ret = 0; 409 int ret = 0;
410 410
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 9d014b82a124..c5b0d562dbf5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1586,6 +1586,8 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1586 winchip_mcheck_init(c); 1586 winchip_mcheck_init(c);
1587 return 1; 1587 return 1;
1588 break; 1588 break;
1589 default:
1590 return 0;
1589 } 1591 }
1590 1592
1591 return 0; 1593 return 0;
@@ -1605,6 +1607,8 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1605 mce_amd_feature_init(c); 1607 mce_amd_feature_init(c);
1606 mce_flags.overflow_recov = !!(ebx & BIT(0)); 1608 mce_flags.overflow_recov = !!(ebx & BIT(0));
1607 mce_flags.succor = !!(ebx & BIT(1)); 1609 mce_flags.succor = !!(ebx & BIT(1));
1610 mce_flags.smca = !!(ebx & BIT(3));
1611
1608 break; 1612 break;
1609 } 1613 }
1610 1614
@@ -2042,7 +2046,7 @@ int __init mcheck_init(void)
2042 * Disable machine checks on suspend and shutdown. We can't really handle 2046 * Disable machine checks on suspend and shutdown. We can't really handle
2043 * them later. 2047 * them later.
2044 */ 2048 */
2045static int mce_disable_error_reporting(void) 2049static void mce_disable_error_reporting(void)
2046{ 2050{
2047 int i; 2051 int i;
2048 2052
@@ -2052,17 +2056,32 @@ static int mce_disable_error_reporting(void)
2052 if (b->init) 2056 if (b->init)
2053 wrmsrl(MSR_IA32_MCx_CTL(i), 0); 2057 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2054 } 2058 }
2055 return 0; 2059 return;
2060}
2061
2062static void vendor_disable_error_reporting(void)
2063{
2064 /*
2065 * Don't clear on Intel CPUs. Some of these MSRs are socket-wide.
2066 * Disabling them for just a single offlined CPU is bad, since it will
2067 * inhibit reporting for all shared resources on the socket like the
2068 * last level cache (LLC), the integrated memory controller (iMC), etc.
2069 */
2070 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2071 return;
2072
2073 mce_disable_error_reporting();
2056} 2074}
2057 2075
2058static int mce_syscore_suspend(void) 2076static int mce_syscore_suspend(void)
2059{ 2077{
2060 return mce_disable_error_reporting(); 2078 vendor_disable_error_reporting();
2079 return 0;
2061} 2080}
2062 2081
2063static void mce_syscore_shutdown(void) 2082static void mce_syscore_shutdown(void)
2064{ 2083{
2065 mce_disable_error_reporting(); 2084 vendor_disable_error_reporting();
2066} 2085}
2067 2086
2068/* 2087/*
@@ -2342,19 +2361,14 @@ static void mce_device_remove(unsigned int cpu)
2342static void mce_disable_cpu(void *h) 2361static void mce_disable_cpu(void *h)
2343{ 2362{
2344 unsigned long action = *(unsigned long *)h; 2363 unsigned long action = *(unsigned long *)h;
2345 int i;
2346 2364
2347 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2365 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2348 return; 2366 return;
2349 2367
2350 if (!(action & CPU_TASKS_FROZEN)) 2368 if (!(action & CPU_TASKS_FROZEN))
2351 cmci_clear(); 2369 cmci_clear();
2352 for (i = 0; i < mca_cfg.banks; i++) {
2353 struct mce_bank *b = &mce_banks[i];
2354 2370
2355 if (b->init) 2371 vendor_disable_error_reporting();
2356 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2357 }
2358} 2372}
2359 2373
2360static void mce_reenable_cpu(void *h) 2374static void mce_reenable_cpu(void *h)
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 1af51b1586d7..2c5aaf8c2e2f 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -503,14 +503,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
503 return; 503 return;
504 } 504 }
505 505
506 /* Check whether a vector already exists */
507 if (h & APIC_VECTOR_MASK) {
508 printk(KERN_DEBUG
509 "CPU%d: Thermal LVT vector (%#x) already installed\n",
510 cpu, (h & APIC_VECTOR_MASK));
511 return;
512 }
513
514 /* early Pentium M models use different method for enabling TM2 */ 506 /* early Pentium M models use different method for enabling TM2 */
515 if (cpu_has(c, X86_FEATURE_TM2)) { 507 if (cpu_has(c, X86_FEATURE_TM2)) {
516 if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { 508 if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile
index 285c85427c32..220b1a508513 100644
--- a/arch/x86/kernel/cpu/microcode/Makefile
+++ b/arch/x86/kernel/cpu/microcode/Makefile
@@ -2,6 +2,3 @@ microcode-y := core.o
2obj-$(CONFIG_MICROCODE) += microcode.o 2obj-$(CONFIG_MICROCODE) += microcode.o
3microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o 3microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o
4microcode-$(CONFIG_MICROCODE_AMD) += amd.o 4microcode-$(CONFIG_MICROCODE_AMD) += amd.o
5obj-$(CONFIG_MICROCODE_EARLY) += core_early.o
6obj-$(CONFIG_MICROCODE_INTEL_EARLY) += intel_early.o
7obj-$(CONFIG_MICROCODE_AMD_EARLY) += amd_early.o
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 12829c3ced3c..2233f8a76615 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -1,5 +1,9 @@
1/* 1/*
2 * AMD CPU Microcode Update Driver for Linux 2 * AMD CPU Microcode Update Driver for Linux
3 *
4 * This driver allows to upgrade microcode on F10h AMD
5 * CPUs and later.
6 *
3 * Copyright (C) 2008-2011 Advanced Micro Devices Inc. 7 * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
4 * 8 *
5 * Author: Peter Oruba <peter.oruba@amd.com> 9 * Author: Peter Oruba <peter.oruba@amd.com>
@@ -7,34 +11,31 @@
7 * Based on work by: 11 * Based on work by:
8 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> 12 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
9 * 13 *
10 * Maintainers: 14 * early loader:
11 * Andreas Herrmann <herrmann.der.user@googlemail.com> 15 * Copyright (C) 2013 Advanced Micro Devices, Inc.
12 * Borislav Petkov <bp@alien8.de>
13 * 16 *
14 * This driver allows to upgrade microcode on F10h AMD 17 * Author: Jacob Shin <jacob.shin@amd.com>
15 * CPUs and later. 18 * Fixes: Borislav Petkov <bp@suse.de>
16 * 19 *
17 * Licensed under the terms of the GNU General Public 20 * Licensed under the terms of the GNU General Public
18 * License version 2. See file COPYING for details. 21 * License version 2. See file COPYING for details.
19 */ 22 */
23#define pr_fmt(fmt) "microcode: " fmt
20 24
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25#include <linux/earlycpio.h>
22
23#include <linux/firmware.h> 26#include <linux/firmware.h>
24#include <linux/uaccess.h> 27#include <linux/uaccess.h>
25#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/initrd.h>
26#include <linux/kernel.h> 30#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h> 31#include <linux/pci.h>
29 32
33#include <asm/microcode_amd.h>
30#include <asm/microcode.h> 34#include <asm/microcode.h>
31#include <asm/processor.h> 35#include <asm/processor.h>
36#include <asm/setup.h>
37#include <asm/cpu.h>
32#include <asm/msr.h> 38#include <asm/msr.h>
33#include <asm/microcode_amd.h>
34
35MODULE_DESCRIPTION("AMD Microcode Update Driver");
36MODULE_AUTHOR("Peter Oruba");
37MODULE_LICENSE("GPL v2");
38 39
39static struct equiv_cpu_entry *equiv_cpu_table; 40static struct equiv_cpu_entry *equiv_cpu_table;
40 41
@@ -47,6 +48,432 @@ struct ucode_patch {
47 48
48static LIST_HEAD(pcache); 49static LIST_HEAD(pcache);
49 50
51/*
52 * This points to the current valid container of microcode patches which we will
53 * save from the initrd before jettisoning its contents.
54 */
55static u8 *container;
56static size_t container_size;
57
58static u32 ucode_new_rev;
59u8 amd_ucode_patch[PATCH_MAX_SIZE];
60static u16 this_equiv_id;
61
62static struct cpio_data ucode_cpio;
63
64/*
65 * Microcode patch container file is prepended to the initrd in cpio format.
66 * See Documentation/x86/early-microcode.txt
67 */
68static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
69
70static struct cpio_data __init find_ucode_in_initrd(void)
71{
72 long offset = 0;
73 char *path;
74 void *start;
75 size_t size;
76
77#ifdef CONFIG_X86_32
78 struct boot_params *p;
79
80 /*
81 * On 32-bit, early load occurs before paging is turned on so we need
82 * to use physical addresses.
83 */
84 p = (struct boot_params *)__pa_nodebug(&boot_params);
85 path = (char *)__pa_nodebug(ucode_path);
86 start = (void *)p->hdr.ramdisk_image;
87 size = p->hdr.ramdisk_size;
88#else
89 path = ucode_path;
90 start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
91 size = boot_params.hdr.ramdisk_size;
92#endif
93
94 return find_cpio_data(path, start, size, &offset);
95}
96
97static size_t compute_container_size(u8 *data, u32 total_size)
98{
99 size_t size = 0;
100 u32 *header = (u32 *)data;
101
102 if (header[0] != UCODE_MAGIC ||
103 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
104 header[2] == 0) /* size */
105 return size;
106
107 size = header[2] + CONTAINER_HDR_SZ;
108 total_size -= size;
109 data += size;
110
111 while (total_size) {
112 u16 patch_size;
113
114 header = (u32 *)data;
115
116 if (header[0] != UCODE_UCODE_TYPE)
117 break;
118
119 /*
120 * Sanity-check patch size.
121 */
122 patch_size = header[1];
123 if (patch_size > PATCH_MAX_SIZE)
124 break;
125
126 size += patch_size + SECTION_HDR_SIZE;
127 data += patch_size + SECTION_HDR_SIZE;
128 total_size -= patch_size + SECTION_HDR_SIZE;
129 }
130
131 return size;
132}
133
134/*
135 * Early load occurs before we can vmalloc(). So we look for the microcode
136 * patch container file in initrd, traverse equivalent cpu table, look for a
137 * matching microcode patch, and update, all in initrd memory in place.
138 * When vmalloc() is available for use later -- on 64-bit during first AP load,
139 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
140 * load_microcode_amd() to save equivalent cpu table and microcode patches in
141 * kernel heap memory.
142 */
143static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
144{
145 struct equiv_cpu_entry *eq;
146 size_t *cont_sz;
147 u32 *header;
148 u8 *data, **cont;
149 u8 (*patch)[PATCH_MAX_SIZE];
150 u16 eq_id = 0;
151 int offset, left;
152 u32 rev, eax, ebx, ecx, edx;
153 u32 *new_rev;
154
155#ifdef CONFIG_X86_32
156 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
157 cont_sz = (size_t *)__pa_nodebug(&container_size);
158 cont = (u8 **)__pa_nodebug(&container);
159 patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
160#else
161 new_rev = &ucode_new_rev;
162 cont_sz = &container_size;
163 cont = &container;
164 patch = &amd_ucode_patch;
165#endif
166
167 data = ucode;
168 left = size;
169 header = (u32 *)data;
170
171 /* find equiv cpu table */
172 if (header[0] != UCODE_MAGIC ||
173 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
174 header[2] == 0) /* size */
175 return;
176
177 eax = 0x00000001;
178 ecx = 0;
179 native_cpuid(&eax, &ebx, &ecx, &edx);
180
181 while (left > 0) {
182 eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
183
184 *cont = data;
185
186 /* Advance past the container header */
187 offset = header[2] + CONTAINER_HDR_SZ;
188 data += offset;
189 left -= offset;
190
191 eq_id = find_equiv_id(eq, eax);
192 if (eq_id) {
193 this_equiv_id = eq_id;
194 *cont_sz = compute_container_size(*cont, left + offset);
195
196 /*
197 * truncate how much we need to iterate over in the
198 * ucode update loop below
199 */
200 left = *cont_sz - offset;
201 break;
202 }
203
204 /*
205 * support multiple container files appended together. if this
206 * one does not have a matching equivalent cpu entry, we fast
207 * forward to the next container file.
208 */
209 while (left > 0) {
210 header = (u32 *)data;
211 if (header[0] == UCODE_MAGIC &&
212 header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
213 break;
214
215 offset = header[1] + SECTION_HDR_SIZE;
216 data += offset;
217 left -= offset;
218 }
219
220 /* mark where the next microcode container file starts */
221 offset = data - (u8 *)ucode;
222 ucode = data;
223 }
224
225 if (!eq_id) {
226 *cont = NULL;
227 *cont_sz = 0;
228 return;
229 }
230
231 if (check_current_patch_level(&rev, true))
232 return;
233
234 while (left > 0) {
235 struct microcode_amd *mc;
236
237 header = (u32 *)data;
238 if (header[0] != UCODE_UCODE_TYPE || /* type */
239 header[1] == 0) /* size */
240 break;
241
242 mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
243
244 if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
245
246 if (!__apply_microcode_amd(mc)) {
247 rev = mc->hdr.patch_id;
248 *new_rev = rev;
249
250 if (save_patch)
251 memcpy(patch, mc,
252 min_t(u32, header[1], PATCH_MAX_SIZE));
253 }
254 }
255
256 offset = header[1] + SECTION_HDR_SIZE;
257 data += offset;
258 left -= offset;
259 }
260}
261
262static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
263 unsigned int family)
264{
265#ifdef CONFIG_X86_64
266 char fw_name[36] = "amd-ucode/microcode_amd.bin";
267
268 if (family >= 0x15)
269 snprintf(fw_name, sizeof(fw_name),
270 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
271
272 return get_builtin_firmware(cp, fw_name);
273#else
274 return false;
275#endif
276}
277
278void __init load_ucode_amd_bsp(unsigned int family)
279{
280 struct cpio_data cp;
281 void **data;
282 size_t *size;
283
284#ifdef CONFIG_X86_32
285 data = (void **)__pa_nodebug(&ucode_cpio.data);
286 size = (size_t *)__pa_nodebug(&ucode_cpio.size);
287#else
288 data = &ucode_cpio.data;
289 size = &ucode_cpio.size;
290#endif
291
292 cp = find_ucode_in_initrd();
293 if (!cp.data) {
294 if (!load_builtin_amd_microcode(&cp, family))
295 return;
296 }
297
298 *data = cp.data;
299 *size = cp.size;
300
301 apply_ucode_in_initrd(cp.data, cp.size, true);
302}
303
304#ifdef CONFIG_X86_32
305/*
306 * On 32-bit, since AP's early load occurs before paging is turned on, we
307 * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
308 * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
309 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
310 * which is used upon resume from suspend.
311 */
312void load_ucode_amd_ap(void)
313{
314 struct microcode_amd *mc;
315 size_t *usize;
316 void **ucode;
317
318 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
319 if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
320 __apply_microcode_amd(mc);
321 return;
322 }
323
324 ucode = (void *)__pa_nodebug(&container);
325 usize = (size_t *)__pa_nodebug(&container_size);
326
327 if (!*ucode || !*usize)
328 return;
329
330 apply_ucode_in_initrd(*ucode, *usize, false);
331}
332
333static void __init collect_cpu_sig_on_bsp(void *arg)
334{
335 unsigned int cpu = smp_processor_id();
336 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
337
338 uci->cpu_sig.sig = cpuid_eax(0x00000001);
339}
340
341static void __init get_bsp_sig(void)
342{
343 unsigned int bsp = boot_cpu_data.cpu_index;
344 struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
345
346 if (!uci->cpu_sig.sig)
347 smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
348}
349#else
350void load_ucode_amd_ap(void)
351{
352 unsigned int cpu = smp_processor_id();
353 struct equiv_cpu_entry *eq;
354 struct microcode_amd *mc;
355 u32 rev, eax;
356 u16 eq_id;
357
358 /* Exit if called on the BSP. */
359 if (!cpu)
360 return;
361
362 if (!container)
363 return;
364
365 /*
366 * 64-bit runs with paging enabled, thus early==false.
367 */
368 if (check_current_patch_level(&rev, false))
369 return;
370
371 eax = cpuid_eax(0x00000001);
372 eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
373
374 eq_id = find_equiv_id(eq, eax);
375 if (!eq_id)
376 return;
377
378 if (eq_id == this_equiv_id) {
379 mc = (struct microcode_amd *)amd_ucode_patch;
380
381 if (mc && rev < mc->hdr.patch_id) {
382 if (!__apply_microcode_amd(mc))
383 ucode_new_rev = mc->hdr.patch_id;
384 }
385
386 } else {
387 if (!ucode_cpio.data)
388 return;
389
390 /*
391 * AP has a different equivalence ID than BSP, looks like
392 * mixed-steppings silicon so go through the ucode blob anew.
393 */
394 apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
395 }
396}
397#endif
398
399int __init save_microcode_in_initrd_amd(void)
400{
401 unsigned long cont;
402 int retval = 0;
403 enum ucode_state ret;
404 u8 *cont_va;
405 u32 eax;
406
407 if (!container)
408 return -EINVAL;
409
410#ifdef CONFIG_X86_32
411 get_bsp_sig();
412 cont = (unsigned long)container;
413 cont_va = __va(container);
414#else
415 /*
416 * We need the physical address of the container for both bitness since
417 * boot_params.hdr.ramdisk_image is a physical address.
418 */
419 cont = __pa(container);
420 cont_va = container;
421#endif
422
423 /*
424 * Take into account the fact that the ramdisk might get relocated and
425 * therefore we need to recompute the container's position in virtual
426 * memory space.
427 */
428 if (relocated_ramdisk)
429 container = (u8 *)(__va(relocated_ramdisk) +
430 (cont - boot_params.hdr.ramdisk_image));
431 else
432 container = cont_va;
433
434 if (ucode_new_rev)
435 pr_info("microcode: updated early to new patch_level=0x%08x\n",
436 ucode_new_rev);
437
438 eax = cpuid_eax(0x00000001);
439 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
440
441 ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
442 if (ret != UCODE_OK)
443 retval = -EINVAL;
444
445 /*
446 * This will be freed any msec now, stash patches for the current
447 * family and switch to patch cache for cpu hotplug, etc later.
448 */
449 container = NULL;
450 container_size = 0;
451
452 return retval;
453}
454
455void reload_ucode_amd(void)
456{
457 struct microcode_amd *mc;
458 u32 rev;
459
460 /*
461 * early==false because this is a syscore ->resume path and by
462 * that time paging is long enabled.
463 */
464 if (check_current_patch_level(&rev, false))
465 return;
466
467 mc = (struct microcode_amd *)amd_ucode_patch;
468
469 if (mc && rev < mc->hdr.patch_id) {
470 if (!__apply_microcode_amd(mc)) {
471 ucode_new_rev = mc->hdr.patch_id;
472 pr_info("microcode: reload patch_level=0x%08x\n",
473 ucode_new_rev);
474 }
475 }
476}
50static u16 __find_equiv_id(unsigned int cpu) 477static u16 __find_equiv_id(unsigned int cpu)
51{ 478{
52 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 479 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
@@ -177,6 +604,53 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
177 return patch_size; 604 return patch_size;
178} 605}
179 606
607/*
608 * Those patch levels cannot be updated to newer ones and thus should be final.
609 */
610static u32 final_levels[] = {
611 0x01000098,
612 0x0100009f,
613 0x010000af,
614 0, /* T-101 terminator */
615};
616
617/*
618 * Check the current patch level on this CPU.
619 *
620 * @rev: Use it to return the patch level. It is set to 0 in the case of
621 * error.
622 *
623 * Returns:
624 * - true: if update should stop
625 * - false: otherwise
626 */
627bool check_current_patch_level(u32 *rev, bool early)
628{
629 u32 lvl, dummy, i;
630 bool ret = false;
631 u32 *levels;
632
633 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
634
635 if (IS_ENABLED(CONFIG_X86_32) && early)
636 levels = (u32 *)__pa_nodebug(&final_levels);
637 else
638 levels = final_levels;
639
640 for (i = 0; levels[i]; i++) {
641 if (lvl == levels[i]) {
642 lvl = 0;
643 ret = true;
644 break;
645 }
646 }
647
648 if (rev)
649 *rev = lvl;
650
651 return ret;
652}
653
180int __apply_microcode_amd(struct microcode_amd *mc_amd) 654int __apply_microcode_amd(struct microcode_amd *mc_amd)
181{ 655{
182 u32 rev, dummy; 656 u32 rev, dummy;
@@ -197,7 +671,7 @@ int apply_microcode_amd(int cpu)
197 struct microcode_amd *mc_amd; 671 struct microcode_amd *mc_amd;
198 struct ucode_cpu_info *uci; 672 struct ucode_cpu_info *uci;
199 struct ucode_patch *p; 673 struct ucode_patch *p;
200 u32 rev, dummy; 674 u32 rev;
201 675
202 BUG_ON(raw_smp_processor_id() != cpu); 676 BUG_ON(raw_smp_processor_id() != cpu);
203 677
@@ -210,7 +684,8 @@ int apply_microcode_amd(int cpu)
210 mc_amd = p->data; 684 mc_amd = p->data;
211 uci->mc = p->data; 685 uci->mc = p->data;
212 686
213 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 687 if (check_current_patch_level(&rev, false))
688 return -1;
214 689
215 /* need to apply patch? */ 690 /* need to apply patch? */
216 if (rev >= mc_amd->hdr.patch_id) { 691 if (rev >= mc_amd->hdr.patch_id) {
@@ -387,7 +862,7 @@ enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t s
387 if (ret != UCODE_OK) 862 if (ret != UCODE_OK)
388 cleanup(); 863 cleanup();
389 864
390#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) 865#ifdef CONFIG_X86_32
391 /* save BSP's matching patch for early load */ 866 /* save BSP's matching patch for early load */
392 if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { 867 if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
393 struct ucode_patch *p = find_patch(cpu); 868 struct ucode_patch *p = find_patch(cpu);
@@ -475,7 +950,7 @@ static struct microcode_ops microcode_amd_ops = {
475 950
476struct microcode_ops * __init init_amd_microcode(void) 951struct microcode_ops * __init init_amd_microcode(void)
477{ 952{
478 struct cpuinfo_x86 *c = &cpu_data(0); 953 struct cpuinfo_x86 *c = &boot_cpu_data;
479 954
480 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { 955 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
481 pr_warning("AMD CPU family 0x%x not supported\n", c->x86); 956 pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
deleted file mode 100644
index e8a215a9a345..000000000000
--- a/arch/x86/kernel/cpu/microcode/amd_early.c
+++ /dev/null
@@ -1,440 +0,0 @@
1/*
2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
3 *
4 * Author: Jacob Shin <jacob.shin@amd.com>
5 * Fixes: Borislav Petkov <bp@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/earlycpio.h>
13#include <linux/initrd.h>
14
15#include <asm/cpu.h>
16#include <asm/setup.h>
17#include <asm/microcode_amd.h>
18
19/*
20 * This points to the current valid container of microcode patches which we will
21 * save from the initrd before jettisoning its contents.
22 */
23static u8 *container;
24static size_t container_size;
25
26static u32 ucode_new_rev;
27u8 amd_ucode_patch[PATCH_MAX_SIZE];
28static u16 this_equiv_id;
29
30static struct cpio_data ucode_cpio;
31
32/*
33 * Microcode patch container file is prepended to the initrd in cpio format.
34 * See Documentation/x86/early-microcode.txt
35 */
36static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
37
38static struct cpio_data __init find_ucode_in_initrd(void)
39{
40 long offset = 0;
41 char *path;
42 void *start;
43 size_t size;
44
45#ifdef CONFIG_X86_32
46 struct boot_params *p;
47
48 /*
49 * On 32-bit, early load occurs before paging is turned on so we need
50 * to use physical addresses.
51 */
52 p = (struct boot_params *)__pa_nodebug(&boot_params);
53 path = (char *)__pa_nodebug(ucode_path);
54 start = (void *)p->hdr.ramdisk_image;
55 size = p->hdr.ramdisk_size;
56#else
57 path = ucode_path;
58 start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
59 size = boot_params.hdr.ramdisk_size;
60#endif
61
62 return find_cpio_data(path, start, size, &offset);
63}
64
65static size_t compute_container_size(u8 *data, u32 total_size)
66{
67 size_t size = 0;
68 u32 *header = (u32 *)data;
69
70 if (header[0] != UCODE_MAGIC ||
71 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
72 header[2] == 0) /* size */
73 return size;
74
75 size = header[2] + CONTAINER_HDR_SZ;
76 total_size -= size;
77 data += size;
78
79 while (total_size) {
80 u16 patch_size;
81
82 header = (u32 *)data;
83
84 if (header[0] != UCODE_UCODE_TYPE)
85 break;
86
87 /*
88 * Sanity-check patch size.
89 */
90 patch_size = header[1];
91 if (patch_size > PATCH_MAX_SIZE)
92 break;
93
94 size += patch_size + SECTION_HDR_SIZE;
95 data += patch_size + SECTION_HDR_SIZE;
96 total_size -= patch_size + SECTION_HDR_SIZE;
97 }
98
99 return size;
100}
101
102/*
103 * Early load occurs before we can vmalloc(). So we look for the microcode
104 * patch container file in initrd, traverse equivalent cpu table, look for a
105 * matching microcode patch, and update, all in initrd memory in place.
106 * When vmalloc() is available for use later -- on 64-bit during first AP load,
107 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
108 * load_microcode_amd() to save equivalent cpu table and microcode patches in
109 * kernel heap memory.
110 */
111static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
112{
113 struct equiv_cpu_entry *eq;
114 size_t *cont_sz;
115 u32 *header;
116 u8 *data, **cont;
117 u8 (*patch)[PATCH_MAX_SIZE];
118 u16 eq_id = 0;
119 int offset, left;
120 u32 rev, eax, ebx, ecx, edx;
121 u32 *new_rev;
122
123#ifdef CONFIG_X86_32
124 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
125 cont_sz = (size_t *)__pa_nodebug(&container_size);
126 cont = (u8 **)__pa_nodebug(&container);
127 patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
128#else
129 new_rev = &ucode_new_rev;
130 cont_sz = &container_size;
131 cont = &container;
132 patch = &amd_ucode_patch;
133#endif
134
135 data = ucode;
136 left = size;
137 header = (u32 *)data;
138
139 /* find equiv cpu table */
140 if (header[0] != UCODE_MAGIC ||
141 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
142 header[2] == 0) /* size */
143 return;
144
145 eax = 0x00000001;
146 ecx = 0;
147 native_cpuid(&eax, &ebx, &ecx, &edx);
148
149 while (left > 0) {
150 eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
151
152 *cont = data;
153
154 /* Advance past the container header */
155 offset = header[2] + CONTAINER_HDR_SZ;
156 data += offset;
157 left -= offset;
158
159 eq_id = find_equiv_id(eq, eax);
160 if (eq_id) {
161 this_equiv_id = eq_id;
162 *cont_sz = compute_container_size(*cont, left + offset);
163
164 /*
165 * truncate how much we need to iterate over in the
166 * ucode update loop below
167 */
168 left = *cont_sz - offset;
169 break;
170 }
171
172 /*
173 * support multiple container files appended together. if this
174 * one does not have a matching equivalent cpu entry, we fast
175 * forward to the next container file.
176 */
177 while (left > 0) {
178 header = (u32 *)data;
179 if (header[0] == UCODE_MAGIC &&
180 header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
181 break;
182
183 offset = header[1] + SECTION_HDR_SIZE;
184 data += offset;
185 left -= offset;
186 }
187
188 /* mark where the next microcode container file starts */
189 offset = data - (u8 *)ucode;
190 ucode = data;
191 }
192
193 if (!eq_id) {
194 *cont = NULL;
195 *cont_sz = 0;
196 return;
197 }
198
199 /* find ucode and update if needed */
200
201 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
202
203 while (left > 0) {
204 struct microcode_amd *mc;
205
206 header = (u32 *)data;
207 if (header[0] != UCODE_UCODE_TYPE || /* type */
208 header[1] == 0) /* size */
209 break;
210
211 mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
212
213 if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
214
215 if (!__apply_microcode_amd(mc)) {
216 rev = mc->hdr.patch_id;
217 *new_rev = rev;
218
219 if (save_patch)
220 memcpy(patch, mc,
221 min_t(u32, header[1], PATCH_MAX_SIZE));
222 }
223 }
224
225 offset = header[1] + SECTION_HDR_SIZE;
226 data += offset;
227 left -= offset;
228 }
229}
230
231static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
232 unsigned int family)
233{
234#ifdef CONFIG_X86_64
235 char fw_name[36] = "amd-ucode/microcode_amd.bin";
236
237 if (family >= 0x15)
238 snprintf(fw_name, sizeof(fw_name),
239 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
240
241 return get_builtin_firmware(cp, fw_name);
242#else
243 return false;
244#endif
245}
246
247void __init load_ucode_amd_bsp(unsigned int family)
248{
249 struct cpio_data cp;
250 void **data;
251 size_t *size;
252
253#ifdef CONFIG_X86_32
254 data = (void **)__pa_nodebug(&ucode_cpio.data);
255 size = (size_t *)__pa_nodebug(&ucode_cpio.size);
256#else
257 data = &ucode_cpio.data;
258 size = &ucode_cpio.size;
259#endif
260
261 cp = find_ucode_in_initrd();
262 if (!cp.data) {
263 if (!load_builtin_amd_microcode(&cp, family))
264 return;
265 }
266
267 *data = cp.data;
268 *size = cp.size;
269
270 apply_ucode_in_initrd(cp.data, cp.size, true);
271}
272
273#ifdef CONFIG_X86_32
274/*
275 * On 32-bit, since AP's early load occurs before paging is turned on, we
276 * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
277 * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
278 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
279 * which is used upon resume from suspend.
280 */
281void load_ucode_amd_ap(void)
282{
283 struct microcode_amd *mc;
284 size_t *usize;
285 void **ucode;
286
287 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
288 if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
289 __apply_microcode_amd(mc);
290 return;
291 }
292
293 ucode = (void *)__pa_nodebug(&container);
294 usize = (size_t *)__pa_nodebug(&container_size);
295
296 if (!*ucode || !*usize)
297 return;
298
299 apply_ucode_in_initrd(*ucode, *usize, false);
300}
301
302static void __init collect_cpu_sig_on_bsp(void *arg)
303{
304 unsigned int cpu = smp_processor_id();
305 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
306
307 uci->cpu_sig.sig = cpuid_eax(0x00000001);
308}
309
310static void __init get_bsp_sig(void)
311{
312 unsigned int bsp = boot_cpu_data.cpu_index;
313 struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
314
315 if (!uci->cpu_sig.sig)
316 smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
317}
318#else
319void load_ucode_amd_ap(void)
320{
321 unsigned int cpu = smp_processor_id();
322 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
323 struct equiv_cpu_entry *eq;
324 struct microcode_amd *mc;
325 u32 rev, eax;
326 u16 eq_id;
327
328 /* Exit if called on the BSP. */
329 if (!cpu)
330 return;
331
332 if (!container)
333 return;
334
335 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
336
337 uci->cpu_sig.rev = rev;
338 uci->cpu_sig.sig = eax;
339
340 eax = cpuid_eax(0x00000001);
341 eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
342
343 eq_id = find_equiv_id(eq, eax);
344 if (!eq_id)
345 return;
346
347 if (eq_id == this_equiv_id) {
348 mc = (struct microcode_amd *)amd_ucode_patch;
349
350 if (mc && rev < mc->hdr.patch_id) {
351 if (!__apply_microcode_amd(mc))
352 ucode_new_rev = mc->hdr.patch_id;
353 }
354
355 } else {
356 if (!ucode_cpio.data)
357 return;
358
359 /*
360 * AP has a different equivalence ID than BSP, looks like
361 * mixed-steppings silicon so go through the ucode blob anew.
362 */
363 apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
364 }
365}
366#endif
367
368int __init save_microcode_in_initrd_amd(void)
369{
370 unsigned long cont;
371 int retval = 0;
372 enum ucode_state ret;
373 u8 *cont_va;
374 u32 eax;
375
376 if (!container)
377 return -EINVAL;
378
379#ifdef CONFIG_X86_32
380 get_bsp_sig();
381 cont = (unsigned long)container;
382 cont_va = __va(container);
383#else
384 /*
385 * We need the physical address of the container for both bitness since
386 * boot_params.hdr.ramdisk_image is a physical address.
387 */
388 cont = __pa(container);
389 cont_va = container;
390#endif
391
392 /*
393 * Take into account the fact that the ramdisk might get relocated and
394 * therefore we need to recompute the container's position in virtual
395 * memory space.
396 */
397 if (relocated_ramdisk)
398 container = (u8 *)(__va(relocated_ramdisk) +
399 (cont - boot_params.hdr.ramdisk_image));
400 else
401 container = cont_va;
402
403 if (ucode_new_rev)
404 pr_info("microcode: updated early to new patch_level=0x%08x\n",
405 ucode_new_rev);
406
407 eax = cpuid_eax(0x00000001);
408 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
409
410 ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
411 if (ret != UCODE_OK)
412 retval = -EINVAL;
413
414 /*
415 * This will be freed any msec now, stash patches for the current
416 * family and switch to patch cache for cpu hotplug, etc later.
417 */
418 container = NULL;
419 container_size = 0;
420
421 return retval;
422}
423
424void reload_ucode_amd(void)
425{
426 struct microcode_amd *mc;
427 u32 rev, eax;
428
429 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
430
431 mc = (struct microcode_amd *)amd_ucode_patch;
432
433 if (mc && rev < mc->hdr.patch_id) {
434 if (!__apply_microcode_amd(mc)) {
435 ucode_new_rev = mc->hdr.patch_id;
436 pr_info("microcode: reload patch_level=0x%08x\n",
437 ucode_new_rev);
438 }
439 }
440}
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 9e3f3c7dd5d7..7fc27f1cca58 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -5,6 +5,12 @@
5 * 2006 Shaohua Li <shaohua.li@intel.com> 5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 * 2013-2015 Borislav Petkov <bp@alien8.de> 6 * 2013-2015 Borislav Petkov <bp@alien8.de>
7 * 7 *
8 * X86 CPU microcode early update for Linux:
9 *
10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
11 * H Peter Anvin" <hpa@zytor.com>
12 * (C) 2015 Borislav Petkov <bp@alien8.de>
13 *
8 * This driver allows to upgrade microcode on x86 processors. 14 * This driver allows to upgrade microcode on x86 processors.
9 * 15 *
10 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
@@ -13,34 +19,39 @@
13 * 2 of the License, or (at your option) any later version. 19 * 2 of the License, or (at your option) any later version.
14 */ 20 */
15 21
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22#define pr_fmt(fmt) "microcode: " fmt
17 23
18#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/syscore_ops.h>
19#include <linux/miscdevice.h> 26#include <linux/miscdevice.h>
20#include <linux/capability.h> 27#include <linux/capability.h>
28#include <linux/firmware.h>
21#include <linux/kernel.h> 29#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/mutex.h> 30#include <linux/mutex.h>
24#include <linux/cpu.h> 31#include <linux/cpu.h>
25#include <linux/fs.h> 32#include <linux/fs.h>
26#include <linux/mm.h> 33#include <linux/mm.h>
27#include <linux/syscore_ops.h>
28 34
29#include <asm/microcode.h> 35#include <asm/microcode_intel.h>
30#include <asm/processor.h>
31#include <asm/cpu_device_id.h> 36#include <asm/cpu_device_id.h>
37#include <asm/microcode_amd.h>
32#include <asm/perf_event.h> 38#include <asm/perf_event.h>
39#include <asm/microcode.h>
40#include <asm/processor.h>
41#include <asm/cmdline.h>
33 42
34MODULE_DESCRIPTION("Microcode Update Driver"); 43#define MICROCODE_VERSION "2.01"
35MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
36MODULE_LICENSE("GPL");
37
38#define MICROCODE_VERSION "2.00"
39 44
40static struct microcode_ops *microcode_ops; 45static struct microcode_ops *microcode_ops;
41 46
42bool dis_ucode_ldr; 47static bool dis_ucode_ldr;
43module_param(dis_ucode_ldr, bool, 0); 48
49static int __init disable_loader(char *str)
50{
51 dis_ucode_ldr = true;
52 return 1;
53}
54__setup("dis_ucode_ldr", disable_loader);
44 55
45/* 56/*
46 * Synchronization. 57 * Synchronization.
@@ -68,6 +79,150 @@ struct cpu_info_ctx {
68 int err; 79 int err;
69}; 80};
70 81
82static bool __init check_loader_disabled_bsp(void)
83{
84#ifdef CONFIG_X86_32
85 const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
86 const char *opt = "dis_ucode_ldr";
87 const char *option = (const char *)__pa_nodebug(opt);
88 bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
89
90#else /* CONFIG_X86_64 */
91 const char *cmdline = boot_command_line;
92 const char *option = "dis_ucode_ldr";
93 bool *res = &dis_ucode_ldr;
94#endif
95
96 if (cmdline_find_option_bool(cmdline, option))
97 *res = true;
98
99 return *res;
100}
101
102extern struct builtin_fw __start_builtin_fw[];
103extern struct builtin_fw __end_builtin_fw[];
104
105bool get_builtin_firmware(struct cpio_data *cd, const char *name)
106{
107#ifdef CONFIG_FW_LOADER
108 struct builtin_fw *b_fw;
109
110 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
111 if (!strcmp(name, b_fw->name)) {
112 cd->size = b_fw->size;
113 cd->data = b_fw->data;
114 return true;
115 }
116 }
117#endif
118 return false;
119}
120
121void __init load_ucode_bsp(void)
122{
123 int vendor;
124 unsigned int family;
125
126 if (check_loader_disabled_bsp())
127 return;
128
129 if (!have_cpuid_p())
130 return;
131
132 vendor = x86_vendor();
133 family = x86_family();
134
135 switch (vendor) {
136 case X86_VENDOR_INTEL:
137 if (family >= 6)
138 load_ucode_intel_bsp();
139 break;
140 case X86_VENDOR_AMD:
141 if (family >= 0x10)
142 load_ucode_amd_bsp(family);
143 break;
144 default:
145 break;
146 }
147}
148
149static bool check_loader_disabled_ap(void)
150{
151#ifdef CONFIG_X86_32
152 return *((bool *)__pa_nodebug(&dis_ucode_ldr));
153#else
154 return dis_ucode_ldr;
155#endif
156}
157
158void load_ucode_ap(void)
159{
160 int vendor, family;
161
162 if (check_loader_disabled_ap())
163 return;
164
165 if (!have_cpuid_p())
166 return;
167
168 vendor = x86_vendor();
169 family = x86_family();
170
171 switch (vendor) {
172 case X86_VENDOR_INTEL:
173 if (family >= 6)
174 load_ucode_intel_ap();
175 break;
176 case X86_VENDOR_AMD:
177 if (family >= 0x10)
178 load_ucode_amd_ap();
179 break;
180 default:
181 break;
182 }
183}
184
185int __init save_microcode_in_initrd(void)
186{
187 struct cpuinfo_x86 *c = &boot_cpu_data;
188
189 switch (c->x86_vendor) {
190 case X86_VENDOR_INTEL:
191 if (c->x86 >= 6)
192 save_microcode_in_initrd_intel();
193 break;
194 case X86_VENDOR_AMD:
195 if (c->x86 >= 0x10)
196 save_microcode_in_initrd_amd();
197 break;
198 default:
199 break;
200 }
201
202 return 0;
203}
204
205void reload_early_microcode(void)
206{
207 int vendor, family;
208
209 vendor = x86_vendor();
210 family = x86_family();
211
212 switch (vendor) {
213 case X86_VENDOR_INTEL:
214 if (family >= 6)
215 reload_ucode_intel();
216 break;
217 case X86_VENDOR_AMD:
218 if (family >= 0x10)
219 reload_ucode_amd();
220 break;
221 default:
222 break;
223 }
224}
225
71static void collect_cpu_info_local(void *arg) 226static void collect_cpu_info_local(void *arg)
72{ 227{
73 struct cpu_info_ctx *ctx = arg; 228 struct cpu_info_ctx *ctx = arg;
@@ -210,9 +365,6 @@ static void __exit microcode_dev_exit(void)
210{ 365{
211 misc_deregister(&microcode_dev); 366 misc_deregister(&microcode_dev);
212} 367}
213
214MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
215MODULE_ALIAS("devname:cpu/microcode");
216#else 368#else
217#define microcode_dev_init() 0 369#define microcode_dev_init() 0
218#define microcode_dev_exit() do { } while (0) 370#define microcode_dev_exit() do { } while (0)
@@ -463,20 +615,6 @@ static struct notifier_block mc_cpu_notifier = {
463 .notifier_call = mc_cpu_callback, 615 .notifier_call = mc_cpu_callback,
464}; 616};
465 617
466#ifdef MODULE
467/* Autoload on Intel and AMD systems */
468static const struct x86_cpu_id __initconst microcode_id[] = {
469#ifdef CONFIG_MICROCODE_INTEL
470 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, },
471#endif
472#ifdef CONFIG_MICROCODE_AMD
473 { X86_VENDOR_AMD, X86_FAMILY_ANY, X86_MODEL_ANY, },
474#endif
475 {}
476};
477MODULE_DEVICE_TABLE(x86cpu, microcode_id);
478#endif
479
480static struct attribute *cpu_root_microcode_attrs[] = { 618static struct attribute *cpu_root_microcode_attrs[] = {
481 &dev_attr_reload.attr, 619 &dev_attr_reload.attr,
482 NULL 620 NULL
@@ -487,9 +625,9 @@ static struct attribute_group cpu_root_microcode_group = {
487 .attrs = cpu_root_microcode_attrs, 625 .attrs = cpu_root_microcode_attrs,
488}; 626};
489 627
490static int __init microcode_init(void) 628int __init microcode_init(void)
491{ 629{
492 struct cpuinfo_x86 *c = &cpu_data(0); 630 struct cpuinfo_x86 *c = &boot_cpu_data;
493 int error; 631 int error;
494 632
495 if (paravirt_enabled() || dis_ucode_ldr) 633 if (paravirt_enabled() || dis_ucode_ldr)
@@ -560,35 +698,3 @@ static int __init microcode_init(void)
560 return error; 698 return error;
561 699
562} 700}
563module_init(microcode_init);
564
565static void __exit microcode_exit(void)
566{
567 struct cpuinfo_x86 *c = &cpu_data(0);
568
569 microcode_dev_exit();
570
571 unregister_hotcpu_notifier(&mc_cpu_notifier);
572 unregister_syscore_ops(&mc_syscore_ops);
573
574 sysfs_remove_group(&cpu_subsys.dev_root->kobj,
575 &cpu_root_microcode_group);
576
577 get_online_cpus();
578 mutex_lock(&microcode_mutex);
579
580 subsys_interface_unregister(&mc_cpu_interface);
581
582 mutex_unlock(&microcode_mutex);
583 put_online_cpus();
584
585 platform_device_unregister(microcode_pdev);
586
587 microcode_ops = NULL;
588
589 if (c->x86_vendor == X86_VENDOR_AMD)
590 exit_amd_microcode();
591
592 pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n");
593}
594module_exit(microcode_exit);
diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
deleted file mode 100644
index 8ebc421d6299..000000000000
--- a/arch/x86/kernel/cpu/microcode/core_early.c
+++ /dev/null
@@ -1,170 +0,0 @@
1/*
2 * X86 CPU microcode early update for Linux
3 *
4 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
5 * H Peter Anvin" <hpa@zytor.com>
6 * (C) 2015 Borislav Petkov <bp@alien8.de>
7 *
8 * This driver allows to early upgrade microcode on Intel processors
9 * belonging to IA-32 family - PentiumPro, Pentium II,
10 * Pentium III, Xeon, Pentium 4, etc.
11 *
12 * Reference: Section 9.11 of Volume 3, IA-32 Intel Architecture
13 * Software Developer's Manual.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20#include <linux/module.h>
21#include <linux/firmware.h>
22#include <asm/microcode.h>
23#include <asm/microcode_intel.h>
24#include <asm/microcode_amd.h>
25#include <asm/processor.h>
26#include <asm/cmdline.h>
27
28static bool __init check_loader_disabled_bsp(void)
29{
30#ifdef CONFIG_X86_32
31 const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
32 const char *opt = "dis_ucode_ldr";
33 const char *option = (const char *)__pa_nodebug(opt);
34 bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
35
36#else /* CONFIG_X86_64 */
37 const char *cmdline = boot_command_line;
38 const char *option = "dis_ucode_ldr";
39 bool *res = &dis_ucode_ldr;
40#endif
41
42 if (cmdline_find_option_bool(cmdline, option))
43 *res = true;
44
45 return *res;
46}
47
48extern struct builtin_fw __start_builtin_fw[];
49extern struct builtin_fw __end_builtin_fw[];
50
51bool get_builtin_firmware(struct cpio_data *cd, const char *name)
52{
53#ifdef CONFIG_FW_LOADER
54 struct builtin_fw *b_fw;
55
56 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
57 if (!strcmp(name, b_fw->name)) {
58 cd->size = b_fw->size;
59 cd->data = b_fw->data;
60 return true;
61 }
62 }
63#endif
64 return false;
65}
66
67void __init load_ucode_bsp(void)
68{
69 int vendor;
70 unsigned int family;
71
72 if (check_loader_disabled_bsp())
73 return;
74
75 if (!have_cpuid_p())
76 return;
77
78 vendor = x86_vendor();
79 family = x86_family();
80
81 switch (vendor) {
82 case X86_VENDOR_INTEL:
83 if (family >= 6)
84 load_ucode_intel_bsp();
85 break;
86 case X86_VENDOR_AMD:
87 if (family >= 0x10)
88 load_ucode_amd_bsp(family);
89 break;
90 default:
91 break;
92 }
93}
94
95static bool check_loader_disabled_ap(void)
96{
97#ifdef CONFIG_X86_32
98 return *((bool *)__pa_nodebug(&dis_ucode_ldr));
99#else
100 return dis_ucode_ldr;
101#endif
102}
103
104void load_ucode_ap(void)
105{
106 int vendor, family;
107
108 if (check_loader_disabled_ap())
109 return;
110
111 if (!have_cpuid_p())
112 return;
113
114 vendor = x86_vendor();
115 family = x86_family();
116
117 switch (vendor) {
118 case X86_VENDOR_INTEL:
119 if (family >= 6)
120 load_ucode_intel_ap();
121 break;
122 case X86_VENDOR_AMD:
123 if (family >= 0x10)
124 load_ucode_amd_ap();
125 break;
126 default:
127 break;
128 }
129}
130
131int __init save_microcode_in_initrd(void)
132{
133 struct cpuinfo_x86 *c = &boot_cpu_data;
134
135 switch (c->x86_vendor) {
136 case X86_VENDOR_INTEL:
137 if (c->x86 >= 6)
138 save_microcode_in_initrd_intel();
139 break;
140 case X86_VENDOR_AMD:
141 if (c->x86 >= 0x10)
142 save_microcode_in_initrd_amd();
143 break;
144 default:
145 break;
146 }
147
148 return 0;
149}
150
151void reload_early_microcode(void)
152{
153 int vendor, family;
154
155 vendor = x86_vendor();
156 family = x86_family();
157
158 switch (vendor) {
159 case X86_VENDOR_INTEL:
160 if (family >= 6)
161 reload_ucode_intel();
162 break;
163 case X86_VENDOR_AMD:
164 if (family >= 0x10)
165 reload_ucode_amd();
166 break;
167 default:
168 break;
169 }
170}
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 969dc17eb1b4..ce47402eb2f9 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -4,27 +4,804 @@
4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk> 4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com> 5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 * 6 *
7 * Intel CPU microcode early update for Linux
8 *
9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
10 * H Peter Anvin" <hpa@zytor.com>
11 *
7 * This program is free software; you can redistribute it and/or 12 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 13 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 14 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
11 */ 16 */
12 17
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18/*
19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
20 * printk calls into no_printk().
21 *
22 *#define DEBUG
23 */
24#define pr_fmt(fmt) "microcode: " fmt
14 25
26#include <linux/earlycpio.h>
15#include <linux/firmware.h> 27#include <linux/firmware.h>
16#include <linux/uaccess.h> 28#include <linux/uaccess.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
30#include <linux/initrd.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/mm.h>
20 35
21#include <asm/microcode_intel.h> 36#include <asm/microcode_intel.h>
22#include <asm/processor.h> 37#include <asm/processor.h>
38#include <asm/tlbflush.h>
39#include <asm/setup.h>
23#include <asm/msr.h> 40#include <asm/msr.h>
24 41
25MODULE_DESCRIPTION("Microcode Update Driver"); 42static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
26MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); 43static struct mc_saved_data {
27MODULE_LICENSE("GPL"); 44 unsigned int mc_saved_count;
45 struct microcode_intel **mc_saved;
46} mc_saved_data;
47
48static enum ucode_state
49load_microcode_early(struct microcode_intel **saved,
50 unsigned int num_saved, struct ucode_cpu_info *uci)
51{
52 struct microcode_intel *ucode_ptr, *new_mc = NULL;
53 struct microcode_header_intel *mc_hdr;
54 int new_rev, ret, i;
55
56 new_rev = uci->cpu_sig.rev;
57
58 for (i = 0; i < num_saved; i++) {
59 ucode_ptr = saved[i];
60 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
61
62 ret = has_newer_microcode(ucode_ptr,
63 uci->cpu_sig.sig,
64 uci->cpu_sig.pf,
65 new_rev);
66 if (!ret)
67 continue;
68
69 new_rev = mc_hdr->rev;
70 new_mc = ucode_ptr;
71 }
72
73 if (!new_mc)
74 return UCODE_NFOUND;
75
76 uci->mc = (struct microcode_intel *)new_mc;
77 return UCODE_OK;
78}
79
80static inline void
81copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
82 unsigned long off, int num_saved)
83{
84 int i;
85
86 for (i = 0; i < num_saved; i++)
87 mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
88}
89
90#ifdef CONFIG_X86_32
91static void
92microcode_phys(struct microcode_intel **mc_saved_tmp,
93 struct mc_saved_data *mc_saved_data)
94{
95 int i;
96 struct microcode_intel ***mc_saved;
97
98 mc_saved = (struct microcode_intel ***)
99 __pa_nodebug(&mc_saved_data->mc_saved);
100 for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
101 struct microcode_intel *p;
102
103 p = *(struct microcode_intel **)
104 __pa_nodebug(mc_saved_data->mc_saved + i);
105 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
106 }
107}
108#endif
109
110static enum ucode_state
111load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
112 unsigned long initrd_start, struct ucode_cpu_info *uci)
113{
114 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
115 unsigned int count = mc_saved_data->mc_saved_count;
116
117 if (!mc_saved_data->mc_saved) {
118 copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
119
120 return load_microcode_early(mc_saved_tmp, count, uci);
121 } else {
122#ifdef CONFIG_X86_32
123 microcode_phys(mc_saved_tmp, mc_saved_data);
124 return load_microcode_early(mc_saved_tmp, count, uci);
125#else
126 return load_microcode_early(mc_saved_data->mc_saved,
127 count, uci);
128#endif
129 }
130}
131
132/*
133 * Given CPU signature and a microcode patch, this function finds if the
134 * microcode patch has matching family and model with the CPU.
135 */
136static enum ucode_state
137matching_model_microcode(struct microcode_header_intel *mc_header,
138 unsigned long sig)
139{
140 unsigned int fam, model;
141 unsigned int fam_ucode, model_ucode;
142 struct extended_sigtable *ext_header;
143 unsigned long total_size = get_totalsize(mc_header);
144 unsigned long data_size = get_datasize(mc_header);
145 int ext_sigcount, i;
146 struct extended_signature *ext_sig;
147
148 fam = __x86_family(sig);
149 model = x86_model(sig);
150
151 fam_ucode = __x86_family(mc_header->sig);
152 model_ucode = x86_model(mc_header->sig);
153
154 if (fam == fam_ucode && model == model_ucode)
155 return UCODE_OK;
156
157 /* Look for ext. headers: */
158 if (total_size <= data_size + MC_HEADER_SIZE)
159 return UCODE_NFOUND;
160
161 ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
162 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
163 ext_sigcount = ext_header->count;
164
165 for (i = 0; i < ext_sigcount; i++) {
166 fam_ucode = __x86_family(ext_sig->sig);
167 model_ucode = x86_model(ext_sig->sig);
168
169 if (fam == fam_ucode && model == model_ucode)
170 return UCODE_OK;
171
172 ext_sig++;
173 }
174 return UCODE_NFOUND;
175}
176
177static int
178save_microcode(struct mc_saved_data *mc_saved_data,
179 struct microcode_intel **mc_saved_src,
180 unsigned int mc_saved_count)
181{
182 int i, j;
183 struct microcode_intel **saved_ptr;
184 int ret;
185
186 if (!mc_saved_count)
187 return -EINVAL;
188
189 /*
190 * Copy new microcode data.
191 */
192 saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
193 if (!saved_ptr)
194 return -ENOMEM;
195
196 for (i = 0; i < mc_saved_count; i++) {
197 struct microcode_header_intel *mc_hdr;
198 struct microcode_intel *mc;
199 unsigned long size;
200
201 if (!mc_saved_src[i]) {
202 ret = -EINVAL;
203 goto err;
204 }
205
206 mc = mc_saved_src[i];
207 mc_hdr = &mc->hdr;
208 size = get_totalsize(mc_hdr);
209
210 saved_ptr[i] = kmalloc(size, GFP_KERNEL);
211 if (!saved_ptr[i]) {
212 ret = -ENOMEM;
213 goto err;
214 }
215
216 memcpy(saved_ptr[i], mc, size);
217 }
218
219 /*
220 * Point to newly saved microcode.
221 */
222 mc_saved_data->mc_saved = saved_ptr;
223 mc_saved_data->mc_saved_count = mc_saved_count;
224
225 return 0;
226
227err:
228 for (j = 0; j <= i; j++)
229 kfree(saved_ptr[j]);
230 kfree(saved_ptr);
231
232 return ret;
233}
234
235/*
236 * A microcode patch in ucode_ptr is saved into mc_saved
237 * - if it has matching signature and newer revision compared to an existing
238 * patch mc_saved.
239 * - or if it is a newly discovered microcode patch.
240 *
241 * The microcode patch should have matching model with CPU.
242 *
243 * Returns: The updated number @num_saved of saved microcode patches.
244 */
245static unsigned int _save_mc(struct microcode_intel **mc_saved,
246 u8 *ucode_ptr, unsigned int num_saved)
247{
248 struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
249 unsigned int sig, pf;
250 int found = 0, i;
251
252 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
253
254 for (i = 0; i < num_saved; i++) {
255 mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
256 sig = mc_saved_hdr->sig;
257 pf = mc_saved_hdr->pf;
258
259 if (!find_matching_signature(ucode_ptr, sig, pf))
260 continue;
261
262 found = 1;
263
264 if (mc_hdr->rev <= mc_saved_hdr->rev)
265 continue;
266
267 /*
268 * Found an older ucode saved earlier. Replace it with
269 * this newer one.
270 */
271 mc_saved[i] = (struct microcode_intel *)ucode_ptr;
272 break;
273 }
274
275 /* Newly detected microcode, save it to memory. */
276 if (i >= num_saved && !found)
277 mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
278
279 return num_saved;
280}
281
282/*
283 * Get microcode matching with BSP's model. Only CPUs with the same model as
284 * BSP can stay in the platform.
285 */
286static enum ucode_state __init
287get_matching_model_microcode(int cpu, unsigned long start,
288 void *data, size_t size,
289 struct mc_saved_data *mc_saved_data,
290 unsigned long *mc_saved_in_initrd,
291 struct ucode_cpu_info *uci)
292{
293 u8 *ucode_ptr = data;
294 unsigned int leftover = size;
295 enum ucode_state state = UCODE_OK;
296 unsigned int mc_size;
297 struct microcode_header_intel *mc_header;
298 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
299 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
300 int i;
301
302 while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
303
304 if (leftover < sizeof(mc_header))
305 break;
306
307 mc_header = (struct microcode_header_intel *)ucode_ptr;
308
309 mc_size = get_totalsize(mc_header);
310 if (!mc_size || mc_size > leftover ||
311 microcode_sanity_check(ucode_ptr, 0) < 0)
312 break;
313
314 leftover -= mc_size;
315
316 /*
317 * Since APs with same family and model as the BSP may boot in
318 * the platform, we need to find and save microcode patches
319 * with the same family and model as the BSP.
320 */
321 if (matching_model_microcode(mc_header, uci->cpu_sig.sig) !=
322 UCODE_OK) {
323 ucode_ptr += mc_size;
324 continue;
325 }
326
327 mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
328
329 ucode_ptr += mc_size;
330 }
331
332 if (leftover) {
333 state = UCODE_ERROR;
334 goto out;
335 }
336
337 if (mc_saved_count == 0) {
338 state = UCODE_NFOUND;
339 goto out;
340 }
341
342 for (i = 0; i < mc_saved_count; i++)
343 mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
344
345 mc_saved_data->mc_saved_count = mc_saved_count;
346out:
347 return state;
348}
349
350static int collect_cpu_info_early(struct ucode_cpu_info *uci)
351{
352 unsigned int val[2];
353 unsigned int family, model;
354 struct cpu_signature csig;
355 unsigned int eax, ebx, ecx, edx;
356
357 csig.sig = 0;
358 csig.pf = 0;
359 csig.rev = 0;
360
361 memset(uci, 0, sizeof(*uci));
362
363 eax = 0x00000001;
364 ecx = 0;
365 native_cpuid(&eax, &ebx, &ecx, &edx);
366 csig.sig = eax;
367
368 family = __x86_family(csig.sig);
369 model = x86_model(csig.sig);
370
371 if ((model >= 5) || (family > 6)) {
372 /* get processor flags from MSR 0x17 */
373 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
374 csig.pf = 1 << ((val[1] >> 18) & 7);
375 }
376 native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
377
378 /* As documented in the SDM: Do a CPUID 1 here */
379 sync_core();
380
381 /* get the current revision from MSR 0x8B */
382 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
383
384 csig.rev = val[1];
385
386 uci->cpu_sig = csig;
387 uci->valid = 1;
388
389 return 0;
390}
391
392static void show_saved_mc(void)
393{
394#ifdef DEBUG
395 int i, j;
396 unsigned int sig, pf, rev, total_size, data_size, date;
397 struct ucode_cpu_info uci;
398
399 if (mc_saved_data.mc_saved_count == 0) {
400 pr_debug("no microcode data saved.\n");
401 return;
402 }
403 pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
404
405 collect_cpu_info_early(&uci);
406
407 sig = uci.cpu_sig.sig;
408 pf = uci.cpu_sig.pf;
409 rev = uci.cpu_sig.rev;
410 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
411
412 for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
413 struct microcode_header_intel *mc_saved_header;
414 struct extended_sigtable *ext_header;
415 int ext_sigcount;
416 struct extended_signature *ext_sig;
417
418 mc_saved_header = (struct microcode_header_intel *)
419 mc_saved_data.mc_saved[i];
420 sig = mc_saved_header->sig;
421 pf = mc_saved_header->pf;
422 rev = mc_saved_header->rev;
423 total_size = get_totalsize(mc_saved_header);
424 data_size = get_datasize(mc_saved_header);
425 date = mc_saved_header->date;
426
427 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
428 i, sig, pf, rev, total_size,
429 date & 0xffff,
430 date >> 24,
431 (date >> 16) & 0xff);
432
433 /* Look for ext. headers: */
434 if (total_size <= data_size + MC_HEADER_SIZE)
435 continue;
436
437 ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
438 ext_sigcount = ext_header->count;
439 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
440
441 for (j = 0; j < ext_sigcount; j++) {
442 sig = ext_sig->sig;
443 pf = ext_sig->pf;
444
445 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
446 j, sig, pf);
447
448 ext_sig++;
449 }
450
451 }
452#endif
453}
454
455#ifdef CONFIG_HOTPLUG_CPU
456static DEFINE_MUTEX(x86_cpu_microcode_mutex);
457/*
458 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
459 * hot added or resumes.
460 *
461 * Please make sure this mc should be a valid microcode patch before calling
462 * this function.
463 */
464int save_mc_for_early(u8 *mc)
465{
466 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
467 unsigned int mc_saved_count_init;
468 unsigned int mc_saved_count;
469 struct microcode_intel **mc_saved;
470 int ret = 0;
471 int i;
472
473 /*
474 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
475 * hotplug.
476 */
477 mutex_lock(&x86_cpu_microcode_mutex);
478
479 mc_saved_count_init = mc_saved_data.mc_saved_count;
480 mc_saved_count = mc_saved_data.mc_saved_count;
481 mc_saved = mc_saved_data.mc_saved;
482
483 if (mc_saved && mc_saved_count)
484 memcpy(mc_saved_tmp, mc_saved,
485 mc_saved_count * sizeof(struct microcode_intel *));
486 /*
487 * Save the microcode patch mc in mc_save_tmp structure if it's a newer
488 * version.
489 */
490 mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
491
492 /*
493 * Save the mc_save_tmp in global mc_saved_data.
494 */
495 ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count);
496 if (ret) {
497 pr_err("Cannot save microcode patch.\n");
498 goto out;
499 }
500
501 show_saved_mc();
502
503 /*
504 * Free old saved microcode data.
505 */
506 if (mc_saved) {
507 for (i = 0; i < mc_saved_count_init; i++)
508 kfree(mc_saved[i]);
509 kfree(mc_saved);
510 }
511
512out:
513 mutex_unlock(&x86_cpu_microcode_mutex);
514
515 return ret;
516}
517EXPORT_SYMBOL_GPL(save_mc_for_early);
518#endif
519
520static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
521{
522#ifdef CONFIG_X86_64
523 unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
524 unsigned int family, model, stepping;
525 char name[30];
526
527 native_cpuid(&eax, &ebx, &ecx, &edx);
528
529 family = __x86_family(eax);
530 model = x86_model(eax);
531 stepping = eax & 0xf;
532
533 sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping);
534
535 return get_builtin_firmware(cp, name);
536#else
537 return false;
538#endif
539}
540
541static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
542static __init enum ucode_state
543scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
544 unsigned long start, unsigned long size,
545 struct ucode_cpu_info *uci)
546{
547 struct cpio_data cd;
548 long offset = 0;
549#ifdef CONFIG_X86_32
550 char *p = (char *)__pa_nodebug(ucode_name);
551#else
552 char *p = ucode_name;
553#endif
554
555 cd.data = NULL;
556 cd.size = 0;
557
558 cd = find_cpio_data(p, (void *)start, size, &offset);
559 if (!cd.data) {
560 if (!load_builtin_intel_microcode(&cd))
561 return UCODE_ERROR;
562 }
563
564 return get_matching_model_microcode(0, start, cd.data, cd.size,
565 mc_saved_data, initrd, uci);
566}
567
568/*
569 * Print ucode update info.
570 */
571static void
572print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
573{
574 int cpu = smp_processor_id();
575
576 pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
577 cpu,
578 uci->cpu_sig.rev,
579 date & 0xffff,
580 date >> 24,
581 (date >> 16) & 0xff);
582}
583
584#ifdef CONFIG_X86_32
585
586static int delay_ucode_info;
587static int current_mc_date;
588
589/*
590 * Print early updated ucode info after printk works. This is delayed info dump.
591 */
592void show_ucode_info_early(void)
593{
594 struct ucode_cpu_info uci;
595
596 if (delay_ucode_info) {
597 collect_cpu_info_early(&uci);
598 print_ucode_info(&uci, current_mc_date);
599 delay_ucode_info = 0;
600 }
601}
602
603/*
604 * At this point, we can not call printk() yet. Keep microcode patch number in
605 * mc_saved_data.mc_saved and delay printing microcode info in
606 * show_ucode_info_early() until printk() works.
607 */
608static void print_ucode(struct ucode_cpu_info *uci)
609{
610 struct microcode_intel *mc_intel;
611 int *delay_ucode_info_p;
612 int *current_mc_date_p;
613
614 mc_intel = uci->mc;
615 if (mc_intel == NULL)
616 return;
617
618 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
619 current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
620
621 *delay_ucode_info_p = 1;
622 *current_mc_date_p = mc_intel->hdr.date;
623}
624#else
625
626/*
627 * Flush global tlb. We only do this in x86_64 where paging has been enabled
628 * already and PGE should be enabled as well.
629 */
630static inline void flush_tlb_early(void)
631{
632 __native_flush_tlb_global_irq_disabled();
633}
634
635static inline void print_ucode(struct ucode_cpu_info *uci)
636{
637 struct microcode_intel *mc_intel;
638
639 mc_intel = uci->mc;
640 if (mc_intel == NULL)
641 return;
642
643 print_ucode_info(uci, mc_intel->hdr.date);
644}
645#endif
646
647static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
648{
649 struct microcode_intel *mc_intel;
650 unsigned int val[2];
651
652 mc_intel = uci->mc;
653 if (mc_intel == NULL)
654 return 0;
655
656 /* write microcode via MSR 0x79 */
657 native_wrmsr(MSR_IA32_UCODE_WRITE,
658 (unsigned long) mc_intel->bits,
659 (unsigned long) mc_intel->bits >> 16 >> 16);
660 native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
661
662 /* As documented in the SDM: Do a CPUID 1 here */
663 sync_core();
664
665 /* get the current revision from MSR 0x8B */
666 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
667 if (val[1] != mc_intel->hdr.rev)
668 return -1;
669
670#ifdef CONFIG_X86_64
671 /* Flush global tlb. This is precaution. */
672 flush_tlb_early();
673#endif
674 uci->cpu_sig.rev = val[1];
675
676 if (early)
677 print_ucode(uci);
678 else
679 print_ucode_info(uci, mc_intel->hdr.date);
680
681 return 0;
682}
683
684/*
685 * This function converts microcode patch offsets previously stored in
686 * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data.
687 */
688int __init save_microcode_in_initrd_intel(void)
689{
690 unsigned int count = mc_saved_data.mc_saved_count;
691 struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
692 int ret = 0;
693
694 if (count == 0)
695 return ret;
696
697 copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
698 ret = save_microcode(&mc_saved_data, mc_saved, count);
699 if (ret)
700 pr_err("Cannot save microcode patches from initrd.\n");
701
702 show_saved_mc();
703
704 return ret;
705}
706
707static void __init
708_load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
709 unsigned long *initrd,
710 unsigned long start, unsigned long size)
711{
712 struct ucode_cpu_info uci;
713 enum ucode_state ret;
714
715 collect_cpu_info_early(&uci);
716
717 ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
718 if (ret != UCODE_OK)
719 return;
720
721 ret = load_microcode(mc_saved_data, initrd, start, &uci);
722 if (ret != UCODE_OK)
723 return;
724
725 apply_microcode_early(&uci, true);
726}
727
728void __init load_ucode_intel_bsp(void)
729{
730 u64 start, size;
731#ifdef CONFIG_X86_32
732 struct boot_params *p;
733
734 p = (struct boot_params *)__pa_nodebug(&boot_params);
735 start = p->hdr.ramdisk_image;
736 size = p->hdr.ramdisk_size;
737
738 _load_ucode_intel_bsp(
739 (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
740 (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
741 start, size);
742#else
743 start = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
744 size = boot_params.hdr.ramdisk_size;
745
746 _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
747#endif
748}
749
750void load_ucode_intel_ap(void)
751{
752 struct mc_saved_data *mc_saved_data_p;
753 struct ucode_cpu_info uci;
754 unsigned long *mc_saved_in_initrd_p;
755 unsigned long initrd_start_addr;
756 enum ucode_state ret;
757#ifdef CONFIG_X86_32
758 unsigned long *initrd_start_p;
759
760 mc_saved_in_initrd_p =
761 (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
762 mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
763 initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
764 initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
765#else
766 mc_saved_data_p = &mc_saved_data;
767 mc_saved_in_initrd_p = mc_saved_in_initrd;
768 initrd_start_addr = initrd_start;
769#endif
770
771 /*
772 * If there is no valid ucode previously saved in memory, no need to
773 * update ucode on this AP.
774 */
775 if (mc_saved_data_p->mc_saved_count == 0)
776 return;
777
778 collect_cpu_info_early(&uci);
779 ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
780 initrd_start_addr, &uci);
781
782 if (ret != UCODE_OK)
783 return;
784
785 apply_microcode_early(&uci, true);
786}
787
788void reload_ucode_intel(void)
789{
790 struct ucode_cpu_info uci;
791 enum ucode_state ret;
792
793 if (!mc_saved_data.mc_saved_count)
794 return;
795
796 collect_cpu_info_early(&uci);
797
798 ret = load_microcode_early(mc_saved_data.mc_saved,
799 mc_saved_data.mc_saved_count, &uci);
800 if (ret != UCODE_OK)
801 return;
802
803 apply_microcode_early(&uci, false);
804}
28 805
29static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) 806static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
30{ 807{
@@ -264,7 +1041,7 @@ static struct microcode_ops microcode_intel_ops = {
264 1041
265struct microcode_ops * __init init_intel_microcode(void) 1042struct microcode_ops * __init init_intel_microcode(void)
266{ 1043{
267 struct cpuinfo_x86 *c = &cpu_data(0); 1044 struct cpuinfo_x86 *c = &boot_cpu_data;
268 1045
269 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 1046 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
270 cpu_has(c, X86_FEATURE_IA64)) { 1047 cpu_has(c, X86_FEATURE_IA64)) {
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
deleted file mode 100644
index 37ea89c11520..000000000000
--- a/arch/x86/kernel/cpu/microcode/intel_early.c
+++ /dev/null
@@ -1,808 +0,0 @@
1/*
2 * Intel CPU microcode early update for Linux
3 *
4 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
5 * H Peter Anvin" <hpa@zytor.com>
6 *
7 * This allows to early upgrade microcode on Intel processors
8 * belonging to IA-32 family - PentiumPro, Pentium II,
9 * Pentium III, Xeon, Pentium 4, etc.
10 *
11 * Reference: Section 9.11 of Volume 3, IA-32 Intel Architecture
12 * Software Developer's Manual.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
20/*
21 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
22 * printk calls into no_printk().
23 *
24 *#define DEBUG
25 */
26
27#include <linux/module.h>
28#include <linux/mm.h>
29#include <linux/slab.h>
30#include <linux/earlycpio.h>
31#include <linux/initrd.h>
32#include <linux/cpu.h>
33#include <asm/msr.h>
34#include <asm/microcode_intel.h>
35#include <asm/processor.h>
36#include <asm/tlbflush.h>
37#include <asm/setup.h>
38
39#undef pr_fmt
40#define pr_fmt(fmt) "microcode: " fmt
41
42static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
43static struct mc_saved_data {
44 unsigned int mc_saved_count;
45 struct microcode_intel **mc_saved;
46} mc_saved_data;
47
48static enum ucode_state
49load_microcode_early(struct microcode_intel **saved,
50 unsigned int num_saved, struct ucode_cpu_info *uci)
51{
52 struct microcode_intel *ucode_ptr, *new_mc = NULL;
53 struct microcode_header_intel *mc_hdr;
54 int new_rev, ret, i;
55
56 new_rev = uci->cpu_sig.rev;
57
58 for (i = 0; i < num_saved; i++) {
59 ucode_ptr = saved[i];
60 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
61
62 ret = has_newer_microcode(ucode_ptr,
63 uci->cpu_sig.sig,
64 uci->cpu_sig.pf,
65 new_rev);
66 if (!ret)
67 continue;
68
69 new_rev = mc_hdr->rev;
70 new_mc = ucode_ptr;
71 }
72
73 if (!new_mc)
74 return UCODE_NFOUND;
75
76 uci->mc = (struct microcode_intel *)new_mc;
77 return UCODE_OK;
78}
79
80static inline void
81copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
82 unsigned long off, int num_saved)
83{
84 int i;
85
86 for (i = 0; i < num_saved; i++)
87 mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
88}
89
90#ifdef CONFIG_X86_32
91static void
92microcode_phys(struct microcode_intel **mc_saved_tmp,
93 struct mc_saved_data *mc_saved_data)
94{
95 int i;
96 struct microcode_intel ***mc_saved;
97
98 mc_saved = (struct microcode_intel ***)
99 __pa_nodebug(&mc_saved_data->mc_saved);
100 for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
101 struct microcode_intel *p;
102
103 p = *(struct microcode_intel **)
104 __pa_nodebug(mc_saved_data->mc_saved + i);
105 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
106 }
107}
108#endif
109
110static enum ucode_state
111load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
112 unsigned long initrd_start, struct ucode_cpu_info *uci)
113{
114 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
115 unsigned int count = mc_saved_data->mc_saved_count;
116
117 if (!mc_saved_data->mc_saved) {
118 copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
119
120 return load_microcode_early(mc_saved_tmp, count, uci);
121 } else {
122#ifdef CONFIG_X86_32
123 microcode_phys(mc_saved_tmp, mc_saved_data);
124 return load_microcode_early(mc_saved_tmp, count, uci);
125#else
126 return load_microcode_early(mc_saved_data->mc_saved,
127 count, uci);
128#endif
129 }
130}
131
132/*
133 * Given CPU signature and a microcode patch, this function finds if the
134 * microcode patch has matching family and model with the CPU.
135 */
136static enum ucode_state
137matching_model_microcode(struct microcode_header_intel *mc_header,
138 unsigned long sig)
139{
140 unsigned int fam, model;
141 unsigned int fam_ucode, model_ucode;
142 struct extended_sigtable *ext_header;
143 unsigned long total_size = get_totalsize(mc_header);
144 unsigned long data_size = get_datasize(mc_header);
145 int ext_sigcount, i;
146 struct extended_signature *ext_sig;
147
148 fam = __x86_family(sig);
149 model = x86_model(sig);
150
151 fam_ucode = __x86_family(mc_header->sig);
152 model_ucode = x86_model(mc_header->sig);
153
154 if (fam == fam_ucode && model == model_ucode)
155 return UCODE_OK;
156
157 /* Look for ext. headers: */
158 if (total_size <= data_size + MC_HEADER_SIZE)
159 return UCODE_NFOUND;
160
161 ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
162 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
163 ext_sigcount = ext_header->count;
164
165 for (i = 0; i < ext_sigcount; i++) {
166 fam_ucode = __x86_family(ext_sig->sig);
167 model_ucode = x86_model(ext_sig->sig);
168
169 if (fam == fam_ucode && model == model_ucode)
170 return UCODE_OK;
171
172 ext_sig++;
173 }
174 return UCODE_NFOUND;
175}
176
177static int
178save_microcode(struct mc_saved_data *mc_saved_data,
179 struct microcode_intel **mc_saved_src,
180 unsigned int mc_saved_count)
181{
182 int i, j;
183 struct microcode_intel **saved_ptr;
184 int ret;
185
186 if (!mc_saved_count)
187 return -EINVAL;
188
189 /*
190 * Copy new microcode data.
191 */
192 saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
193 if (!saved_ptr)
194 return -ENOMEM;
195
196 for (i = 0; i < mc_saved_count; i++) {
197 struct microcode_header_intel *mc_hdr;
198 struct microcode_intel *mc;
199 unsigned long size;
200
201 if (!mc_saved_src[i]) {
202 ret = -EINVAL;
203 goto err;
204 }
205
206 mc = mc_saved_src[i];
207 mc_hdr = &mc->hdr;
208 size = get_totalsize(mc_hdr);
209
210 saved_ptr[i] = kmalloc(size, GFP_KERNEL);
211 if (!saved_ptr[i]) {
212 ret = -ENOMEM;
213 goto err;
214 }
215
216 memcpy(saved_ptr[i], mc, size);
217 }
218
219 /*
220 * Point to newly saved microcode.
221 */
222 mc_saved_data->mc_saved = saved_ptr;
223 mc_saved_data->mc_saved_count = mc_saved_count;
224
225 return 0;
226
227err:
228 for (j = 0; j <= i; j++)
229 kfree(saved_ptr[j]);
230 kfree(saved_ptr);
231
232 return ret;
233}
234
235/*
236 * A microcode patch in ucode_ptr is saved into mc_saved
237 * - if it has matching signature and newer revision compared to an existing
238 * patch mc_saved.
239 * - or if it is a newly discovered microcode patch.
240 *
241 * The microcode patch should have matching model with CPU.
242 *
243 * Returns: The updated number @num_saved of saved microcode patches.
244 */
245static unsigned int _save_mc(struct microcode_intel **mc_saved,
246 u8 *ucode_ptr, unsigned int num_saved)
247{
248 struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
249 unsigned int sig, pf;
250 int found = 0, i;
251
252 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
253
254 for (i = 0; i < num_saved; i++) {
255 mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
256 sig = mc_saved_hdr->sig;
257 pf = mc_saved_hdr->pf;
258
259 if (!find_matching_signature(ucode_ptr, sig, pf))
260 continue;
261
262 found = 1;
263
264 if (mc_hdr->rev <= mc_saved_hdr->rev)
265 continue;
266
267 /*
268 * Found an older ucode saved earlier. Replace it with
269 * this newer one.
270 */
271 mc_saved[i] = (struct microcode_intel *)ucode_ptr;
272 break;
273 }
274
275 /* Newly detected microcode, save it to memory. */
276 if (i >= num_saved && !found)
277 mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
278
279 return num_saved;
280}
281
282/*
283 * Get microcode matching with BSP's model. Only CPUs with the same model as
284 * BSP can stay in the platform.
285 */
286static enum ucode_state __init
287get_matching_model_microcode(int cpu, unsigned long start,
288 void *data, size_t size,
289 struct mc_saved_data *mc_saved_data,
290 unsigned long *mc_saved_in_initrd,
291 struct ucode_cpu_info *uci)
292{
293 u8 *ucode_ptr = data;
294 unsigned int leftover = size;
295 enum ucode_state state = UCODE_OK;
296 unsigned int mc_size;
297 struct microcode_header_intel *mc_header;
298 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
299 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
300 int i;
301
302 while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
303
304 if (leftover < sizeof(mc_header))
305 break;
306
307 mc_header = (struct microcode_header_intel *)ucode_ptr;
308
309 mc_size = get_totalsize(mc_header);
310 if (!mc_size || mc_size > leftover ||
311 microcode_sanity_check(ucode_ptr, 0) < 0)
312 break;
313
314 leftover -= mc_size;
315
316 /*
317 * Since APs with same family and model as the BSP may boot in
318 * the platform, we need to find and save microcode patches
319 * with the same family and model as the BSP.
320 */
321 if (matching_model_microcode(mc_header, uci->cpu_sig.sig) !=
322 UCODE_OK) {
323 ucode_ptr += mc_size;
324 continue;
325 }
326
327 mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
328
329 ucode_ptr += mc_size;
330 }
331
332 if (leftover) {
333 state = UCODE_ERROR;
334 goto out;
335 }
336
337 if (mc_saved_count == 0) {
338 state = UCODE_NFOUND;
339 goto out;
340 }
341
342 for (i = 0; i < mc_saved_count; i++)
343 mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
344
345 mc_saved_data->mc_saved_count = mc_saved_count;
346out:
347 return state;
348}
349
350static int collect_cpu_info_early(struct ucode_cpu_info *uci)
351{
352 unsigned int val[2];
353 unsigned int family, model;
354 struct cpu_signature csig;
355 unsigned int eax, ebx, ecx, edx;
356
357 csig.sig = 0;
358 csig.pf = 0;
359 csig.rev = 0;
360
361 memset(uci, 0, sizeof(*uci));
362
363 eax = 0x00000001;
364 ecx = 0;
365 native_cpuid(&eax, &ebx, &ecx, &edx);
366 csig.sig = eax;
367
368 family = __x86_family(csig.sig);
369 model = x86_model(csig.sig);
370
371 if ((model >= 5) || (family > 6)) {
372 /* get processor flags from MSR 0x17 */
373 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
374 csig.pf = 1 << ((val[1] >> 18) & 7);
375 }
376 native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
377
378 /* As documented in the SDM: Do a CPUID 1 here */
379 sync_core();
380
381 /* get the current revision from MSR 0x8B */
382 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
383
384 csig.rev = val[1];
385
386 uci->cpu_sig = csig;
387 uci->valid = 1;
388
389 return 0;
390}
391
392#ifdef DEBUG
393static void show_saved_mc(void)
394{
395 int i, j;
396 unsigned int sig, pf, rev, total_size, data_size, date;
397 struct ucode_cpu_info uci;
398
399 if (mc_saved_data.mc_saved_count == 0) {
400 pr_debug("no microcode data saved.\n");
401 return;
402 }
403 pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
404
405 collect_cpu_info_early(&uci);
406
407 sig = uci.cpu_sig.sig;
408 pf = uci.cpu_sig.pf;
409 rev = uci.cpu_sig.rev;
410 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
411
412 for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
413 struct microcode_header_intel *mc_saved_header;
414 struct extended_sigtable *ext_header;
415 int ext_sigcount;
416 struct extended_signature *ext_sig;
417
418 mc_saved_header = (struct microcode_header_intel *)
419 mc_saved_data.mc_saved[i];
420 sig = mc_saved_header->sig;
421 pf = mc_saved_header->pf;
422 rev = mc_saved_header->rev;
423 total_size = get_totalsize(mc_saved_header);
424 data_size = get_datasize(mc_saved_header);
425 date = mc_saved_header->date;
426
427 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
428 i, sig, pf, rev, total_size,
429 date & 0xffff,
430 date >> 24,
431 (date >> 16) & 0xff);
432
433 /* Look for ext. headers: */
434 if (total_size <= data_size + MC_HEADER_SIZE)
435 continue;
436
437 ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
438 ext_sigcount = ext_header->count;
439 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
440
441 for (j = 0; j < ext_sigcount; j++) {
442 sig = ext_sig->sig;
443 pf = ext_sig->pf;
444
445 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
446 j, sig, pf);
447
448 ext_sig++;
449 }
450
451 }
452}
453#else
454static inline void show_saved_mc(void)
455{
456}
457#endif
458
459#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
460static DEFINE_MUTEX(x86_cpu_microcode_mutex);
461/*
462 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
463 * hot added or resumes.
464 *
465 * Please make sure this mc should be a valid microcode patch before calling
466 * this function.
467 */
468int save_mc_for_early(u8 *mc)
469{
470 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
471 unsigned int mc_saved_count_init;
472 unsigned int mc_saved_count;
473 struct microcode_intel **mc_saved;
474 int ret = 0;
475 int i;
476
477 /*
478 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
479 * hotplug.
480 */
481 mutex_lock(&x86_cpu_microcode_mutex);
482
483 mc_saved_count_init = mc_saved_data.mc_saved_count;
484 mc_saved_count = mc_saved_data.mc_saved_count;
485 mc_saved = mc_saved_data.mc_saved;
486
487 if (mc_saved && mc_saved_count)
488 memcpy(mc_saved_tmp, mc_saved,
489 mc_saved_count * sizeof(struct microcode_intel *));
490 /*
491 * Save the microcode patch mc in mc_save_tmp structure if it's a newer
492 * version.
493 */
494 mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
495
496 /*
497 * Save the mc_save_tmp in global mc_saved_data.
498 */
499 ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count);
500 if (ret) {
501 pr_err("Cannot save microcode patch.\n");
502 goto out;
503 }
504
505 show_saved_mc();
506
507 /*
508 * Free old saved microcode data.
509 */
510 if (mc_saved) {
511 for (i = 0; i < mc_saved_count_init; i++)
512 kfree(mc_saved[i]);
513 kfree(mc_saved);
514 }
515
516out:
517 mutex_unlock(&x86_cpu_microcode_mutex);
518
519 return ret;
520}
521EXPORT_SYMBOL_GPL(save_mc_for_early);
522#endif
523
524static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
525{
526#ifdef CONFIG_X86_64
527 unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
528 unsigned int family, model, stepping;
529 char name[30];
530
531 native_cpuid(&eax, &ebx, &ecx, &edx);
532
533 family = __x86_family(eax);
534 model = x86_model(eax);
535 stepping = eax & 0xf;
536
537 sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping);
538
539 return get_builtin_firmware(cp, name);
540#else
541 return false;
542#endif
543}
544
545static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
546static __init enum ucode_state
547scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
548 unsigned long start, unsigned long size,
549 struct ucode_cpu_info *uci)
550{
551 struct cpio_data cd;
552 long offset = 0;
553#ifdef CONFIG_X86_32
554 char *p = (char *)__pa_nodebug(ucode_name);
555#else
556 char *p = ucode_name;
557#endif
558
559 cd.data = NULL;
560 cd.size = 0;
561
562 cd = find_cpio_data(p, (void *)start, size, &offset);
563 if (!cd.data) {
564 if (!load_builtin_intel_microcode(&cd))
565 return UCODE_ERROR;
566 }
567
568 return get_matching_model_microcode(0, start, cd.data, cd.size,
569 mc_saved_data, initrd, uci);
570}
571
572/*
573 * Print ucode update info.
574 */
575static void
576print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
577{
578 int cpu = smp_processor_id();
579
580 pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
581 cpu,
582 uci->cpu_sig.rev,
583 date & 0xffff,
584 date >> 24,
585 (date >> 16) & 0xff);
586}
587
588#ifdef CONFIG_X86_32
589
590static int delay_ucode_info;
591static int current_mc_date;
592
593/*
594 * Print early updated ucode info after printk works. This is delayed info dump.
595 */
596void show_ucode_info_early(void)
597{
598 struct ucode_cpu_info uci;
599
600 if (delay_ucode_info) {
601 collect_cpu_info_early(&uci);
602 print_ucode_info(&uci, current_mc_date);
603 delay_ucode_info = 0;
604 }
605}
606
607/*
608 * At this point, we can not call printk() yet. Keep microcode patch number in
609 * mc_saved_data.mc_saved and delay printing microcode info in
610 * show_ucode_info_early() until printk() works.
611 */
612static void print_ucode(struct ucode_cpu_info *uci)
613{
614 struct microcode_intel *mc_intel;
615 int *delay_ucode_info_p;
616 int *current_mc_date_p;
617
618 mc_intel = uci->mc;
619 if (mc_intel == NULL)
620 return;
621
622 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
623 current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
624
625 *delay_ucode_info_p = 1;
626 *current_mc_date_p = mc_intel->hdr.date;
627}
628#else
629
630/*
631 * Flush global tlb. We only do this in x86_64 where paging has been enabled
632 * already and PGE should be enabled as well.
633 */
634static inline void flush_tlb_early(void)
635{
636 __native_flush_tlb_global_irq_disabled();
637}
638
639static inline void print_ucode(struct ucode_cpu_info *uci)
640{
641 struct microcode_intel *mc_intel;
642
643 mc_intel = uci->mc;
644 if (mc_intel == NULL)
645 return;
646
647 print_ucode_info(uci, mc_intel->hdr.date);
648}
649#endif
650
651static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
652{
653 struct microcode_intel *mc_intel;
654 unsigned int val[2];
655
656 mc_intel = uci->mc;
657 if (mc_intel == NULL)
658 return 0;
659
660 /* write microcode via MSR 0x79 */
661 native_wrmsr(MSR_IA32_UCODE_WRITE,
662 (unsigned long) mc_intel->bits,
663 (unsigned long) mc_intel->bits >> 16 >> 16);
664 native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
665
666 /* As documented in the SDM: Do a CPUID 1 here */
667 sync_core();
668
669 /* get the current revision from MSR 0x8B */
670 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
671 if (val[1] != mc_intel->hdr.rev)
672 return -1;
673
674#ifdef CONFIG_X86_64
675 /* Flush global tlb. This is precaution. */
676 flush_tlb_early();
677#endif
678 uci->cpu_sig.rev = val[1];
679
680 if (early)
681 print_ucode(uci);
682 else
683 print_ucode_info(uci, mc_intel->hdr.date);
684
685 return 0;
686}
687
688/*
689 * This function converts microcode patch offsets previously stored in
690 * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data.
691 */
692int __init save_microcode_in_initrd_intel(void)
693{
694 unsigned int count = mc_saved_data.mc_saved_count;
695 struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
696 int ret = 0;
697
698 if (count == 0)
699 return ret;
700
701 copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
702 ret = save_microcode(&mc_saved_data, mc_saved, count);
703 if (ret)
704 pr_err("Cannot save microcode patches from initrd.\n");
705
706 show_saved_mc();
707
708 return ret;
709}
710
711static void __init
712_load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
713 unsigned long *initrd,
714 unsigned long start, unsigned long size)
715{
716 struct ucode_cpu_info uci;
717 enum ucode_state ret;
718
719 collect_cpu_info_early(&uci);
720
721 ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
722 if (ret != UCODE_OK)
723 return;
724
725 ret = load_microcode(mc_saved_data, initrd, start, &uci);
726 if (ret != UCODE_OK)
727 return;
728
729 apply_microcode_early(&uci, true);
730}
731
732void __init load_ucode_intel_bsp(void)
733{
734 u64 start, size;
735#ifdef CONFIG_X86_32
736 struct boot_params *p;
737
738 p = (struct boot_params *)__pa_nodebug(&boot_params);
739 start = p->hdr.ramdisk_image;
740 size = p->hdr.ramdisk_size;
741
742 _load_ucode_intel_bsp(
743 (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
744 (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
745 start, size);
746#else
747 start = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
748 size = boot_params.hdr.ramdisk_size;
749
750 _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
751#endif
752}
753
754void load_ucode_intel_ap(void)
755{
756 struct mc_saved_data *mc_saved_data_p;
757 struct ucode_cpu_info uci;
758 unsigned long *mc_saved_in_initrd_p;
759 unsigned long initrd_start_addr;
760 enum ucode_state ret;
761#ifdef CONFIG_X86_32
762 unsigned long *initrd_start_p;
763
764 mc_saved_in_initrd_p =
765 (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
766 mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
767 initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
768 initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
769#else
770 mc_saved_data_p = &mc_saved_data;
771 mc_saved_in_initrd_p = mc_saved_in_initrd;
772 initrd_start_addr = initrd_start;
773#endif
774
775 /*
776 * If there is no valid ucode previously saved in memory, no need to
777 * update ucode on this AP.
778 */
779 if (mc_saved_data_p->mc_saved_count == 0)
780 return;
781
782 collect_cpu_info_early(&uci);
783 ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
784 initrd_start_addr, &uci);
785
786 if (ret != UCODE_OK)
787 return;
788
789 apply_microcode_early(&uci, true);
790}
791
792void reload_ucode_intel(void)
793{
794 struct ucode_cpu_info uci;
795 enum ucode_state ret;
796
797 if (!mc_saved_data.mc_saved_count)
798 return;
799
800 collect_cpu_info_early(&uci);
801
802 ret = load_microcode_early(mc_saved_data.mc_saved,
803 mc_saved_data.mc_saved_count, &uci);
804 if (ret != UCODE_OK)
805 return;
806
807 apply_microcode_early(&uci, false);
808}
diff --git a/arch/x86/kernel/cpu/microcode/intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c
index 1883d252ff7d..b96896bcbdaf 100644
--- a/arch/x86/kernel/cpu/microcode/intel_lib.c
+++ b/arch/x86/kernel/cpu/microcode/intel_lib.c
@@ -25,7 +25,6 @@
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h>
29 28
30#include <asm/microcode_intel.h> 29#include <asm/microcode_intel.h>
31#include <asm/processor.h> 30#include <asm/processor.h>
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 66dd3fe99b82..4562cf070c27 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1175,7 +1175,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)
1175 * skip the schedulability test here, it will be performed 1175 * skip the schedulability test here, it will be performed
1176 * at commit time (->commit_txn) as a whole. 1176 * at commit time (->commit_txn) as a whole.
1177 */ 1177 */
1178 if (cpuc->group_flag & PERF_EVENT_TXN) 1178 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1179 goto done_collect; 1179 goto done_collect;
1180 1180
1181 ret = x86_pmu.schedule_events(cpuc, n, assign); 1181 ret = x86_pmu.schedule_events(cpuc, n, assign);
@@ -1326,7 +1326,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
1326 * XXX assumes any ->del() called during a TXN will only be on 1326 * XXX assumes any ->del() called during a TXN will only be on
1327 * an event added during that same TXN. 1327 * an event added during that same TXN.
1328 */ 1328 */
1329 if (cpuc->group_flag & PERF_EVENT_TXN) 1329 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1330 return; 1330 return;
1331 1331
1332 /* 1332 /*
@@ -1748,11 +1748,22 @@ static inline void x86_pmu_read(struct perf_event *event)
1748 * Start group events scheduling transaction 1748 * Start group events scheduling transaction
1749 * Set the flag to make pmu::enable() not perform the 1749 * Set the flag to make pmu::enable() not perform the
1750 * schedulability test, it will be performed at commit time 1750 * schedulability test, it will be performed at commit time
1751 *
1752 * We only support PERF_PMU_TXN_ADD transactions. Save the
1753 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1754 * transactions.
1751 */ 1755 */
1752static void x86_pmu_start_txn(struct pmu *pmu) 1756static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
1753{ 1757{
1758 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1759
1760 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */
1761
1762 cpuc->txn_flags = txn_flags;
1763 if (txn_flags & ~PERF_PMU_TXN_ADD)
1764 return;
1765
1754 perf_pmu_disable(pmu); 1766 perf_pmu_disable(pmu);
1755 __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1756 __this_cpu_write(cpu_hw_events.n_txn, 0); 1767 __this_cpu_write(cpu_hw_events.n_txn, 0);
1757} 1768}
1758 1769
@@ -1763,7 +1774,16 @@ static void x86_pmu_start_txn(struct pmu *pmu)
1763 */ 1774 */
1764static void x86_pmu_cancel_txn(struct pmu *pmu) 1775static void x86_pmu_cancel_txn(struct pmu *pmu)
1765{ 1776{
1766 __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN); 1777 unsigned int txn_flags;
1778 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1779
1780 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1781
1782 txn_flags = cpuc->txn_flags;
1783 cpuc->txn_flags = 0;
1784 if (txn_flags & ~PERF_PMU_TXN_ADD)
1785 return;
1786
1767 /* 1787 /*
1768 * Truncate collected array by the number of events added in this 1788 * Truncate collected array by the number of events added in this
1769 * transaction. See x86_pmu_add() and x86_pmu_*_txn(). 1789 * transaction. See x86_pmu_add() and x86_pmu_*_txn().
@@ -1786,6 +1806,13 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
1786 int assign[X86_PMC_IDX_MAX]; 1806 int assign[X86_PMC_IDX_MAX];
1787 int n, ret; 1807 int n, ret;
1788 1808
1809 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1810
1811 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
1812 cpuc->txn_flags = 0;
1813 return 0;
1814 }
1815
1789 n = cpuc->n_events; 1816 n = cpuc->n_events;
1790 1817
1791 if (!x86_pmu_initialized()) 1818 if (!x86_pmu_initialized())
@@ -1801,7 +1828,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
1801 */ 1828 */
1802 memcpy(cpuc->assign, assign, n*sizeof(int)); 1829 memcpy(cpuc->assign, assign, n*sizeof(int));
1803 1830
1804 cpuc->group_flag &= ~PERF_EVENT_TXN; 1831 cpuc->txn_flags = 0;
1805 perf_pmu_enable(pmu); 1832 perf_pmu_enable(pmu);
1806 return 0; 1833 return 0;
1807} 1834}
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 165be83a7fa4..499f533dd3cc 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -196,7 +196,7 @@ struct cpu_hw_events {
196 196
197 int n_excl; /* the number of exclusive events */ 197 int n_excl; /* the number of exclusive events */
198 198
199 unsigned int group_flag; 199 unsigned int txn_flags;
200 int is_fake; 200 int is_fake;
201 201
202 /* 202 /*
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
index d1c0f254afbe..2cad71d1b14c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
@@ -495,6 +495,19 @@ static int bts_event_init(struct perf_event *event)
495 if (x86_add_exclusive(x86_lbr_exclusive_bts)) 495 if (x86_add_exclusive(x86_lbr_exclusive_bts))
496 return -EBUSY; 496 return -EBUSY;
497 497
498 /*
499 * BTS leaks kernel addresses even when CPL0 tracing is
500 * disabled, so disallow intel_bts driver for unprivileged
501 * users on paranoid systems since it provides trace data
502 * to the user in a zero-copy fashion.
503 *
504 * Note that the default paranoia setting permits unprivileged
505 * users to profile the kernel.
506 */
507 if (event->attr.exclude_kernel && perf_paranoid_kernel() &&
508 !capable(CAP_SYS_ADMIN))
509 return -EACCES;
510
498 ret = x86_reserve_hardware(); 511 ret = x86_reserve_hardware();
499 if (ret) { 512 if (ret) {
500 x86_del_exclusive(x86_lbr_exclusive_bts); 513 x86_del_exclusive(x86_lbr_exclusive_bts);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cstate.c b/arch/x86/kernel/cpu/perf_event_intel_cstate.c
new file mode 100644
index 000000000000..75a38b5a2e26
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_cstate.c
@@ -0,0 +1,694 @@
1/*
2 * perf_event_intel_cstate.c: support cstate residency counters
3 *
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
16 *
17 */
18
19/*
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
25 * access code.
26 *
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
29 *
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
36 *
37 * All of these counters are specified in the Intel® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
39 *
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
42 * perf code: 0x00
43 * Available model: SLM,AMT
44 * Scope: Core (each processor core has a MSR)
45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
46 * perf code: 0x01
47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
48 * Scope: Core
49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
50 * perf code: 0x02
51 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
52 * Scope: Core
53 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
54 * perf code: 0x03
55 * Available model: SNB,IVB,HSW,BDW,SKL
56 * Scope: Core
57 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
58 * perf code: 0x00
59 * Available model: SNB,IVB,HSW,BDW,SKL
60 * Scope: Package (physical package)
61 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
62 * perf code: 0x01
63 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
64 * Scope: Package (physical package)
65 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
66 * perf code: 0x02
67 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
68 * Scope: Package (physical package)
69 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
70 * perf code: 0x03
71 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
72 * Scope: Package (physical package)
73 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
74 * perf code: 0x04
75 * Available model: HSW ULT only
76 * Scope: Package (physical package)
77 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
78 * perf code: 0x05
79 * Available model: HSW ULT only
80 * Scope: Package (physical package)
81 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
82 * perf code: 0x06
83 * Available model: HSW ULT only
84 * Scope: Package (physical package)
85 *
86 */
87
88#include <linux/module.h>
89#include <linux/slab.h>
90#include <linux/perf_event.h>
91#include <asm/cpu_device_id.h>
92#include "perf_event.h"
93
94#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
95static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
96 struct kobj_attribute *attr, \
97 char *page) \
98{ \
99 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
100 return sprintf(page, _format "\n"); \
101} \
102static struct kobj_attribute format_attr_##_var = \
103 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
104
105static ssize_t cstate_get_attr_cpumask(struct device *dev,
106 struct device_attribute *attr,
107 char *buf);
108
109struct perf_cstate_msr {
110 u64 msr;
111 struct perf_pmu_events_attr *attr;
112 bool (*test)(int idx);
113};
114
115
116/* cstate_core PMU */
117
118static struct pmu cstate_core_pmu;
119static bool has_cstate_core;
120
121enum perf_cstate_core_id {
122 /*
123 * cstate_core events
124 */
125 PERF_CSTATE_CORE_C1_RES = 0,
126 PERF_CSTATE_CORE_C3_RES,
127 PERF_CSTATE_CORE_C6_RES,
128 PERF_CSTATE_CORE_C7_RES,
129
130 PERF_CSTATE_CORE_EVENT_MAX,
131};
132
133bool test_core(int idx)
134{
135 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
136 boot_cpu_data.x86 != 6)
137 return false;
138
139 switch (boot_cpu_data.x86_model) {
140 case 30: /* 45nm Nehalem */
141 case 26: /* 45nm Nehalem-EP */
142 case 46: /* 45nm Nehalem-EX */
143
144 case 37: /* 32nm Westmere */
145 case 44: /* 32nm Westmere-EP */
146 case 47: /* 32nm Westmere-EX */
147 if (idx == PERF_CSTATE_CORE_C3_RES ||
148 idx == PERF_CSTATE_CORE_C6_RES)
149 return true;
150 break;
151 case 42: /* 32nm SandyBridge */
152 case 45: /* 32nm SandyBridge-E/EN/EP */
153
154 case 58: /* 22nm IvyBridge */
155 case 62: /* 22nm IvyBridge-EP/EX */
156
157 case 60: /* 22nm Haswell Core */
158 case 63: /* 22nm Haswell Server */
159 case 69: /* 22nm Haswell ULT */
160 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
161
162 case 61: /* 14nm Broadwell Core-M */
163 case 86: /* 14nm Broadwell Xeon D */
164 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
165 case 79: /* 14nm Broadwell Server */
166
167 case 78: /* 14nm Skylake Mobile */
168 case 94: /* 14nm Skylake Desktop */
169 if (idx == PERF_CSTATE_CORE_C3_RES ||
170 idx == PERF_CSTATE_CORE_C6_RES ||
171 idx == PERF_CSTATE_CORE_C7_RES)
172 return true;
173 break;
174 case 55: /* 22nm Atom "Silvermont" */
175 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
176 case 76: /* 14nm Atom "Airmont" */
177 if (idx == PERF_CSTATE_CORE_C1_RES ||
178 idx == PERF_CSTATE_CORE_C6_RES)
179 return true;
180 break;
181 }
182
183 return false;
184}
185
186PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
187PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
188PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
189PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
190
191static struct perf_cstate_msr core_msr[] = {
192 [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &evattr_cstate_core_c1, test_core, },
193 [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &evattr_cstate_core_c3, test_core, },
194 [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &evattr_cstate_core_c6, test_core, },
195 [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &evattr_cstate_core_c7, test_core, },
196};
197
198static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
199 NULL,
200};
201
202static struct attribute_group core_events_attr_group = {
203 .name = "events",
204 .attrs = core_events_attrs,
205};
206
207DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
208static struct attribute *core_format_attrs[] = {
209 &format_attr_core_event.attr,
210 NULL,
211};
212
213static struct attribute_group core_format_attr_group = {
214 .name = "format",
215 .attrs = core_format_attrs,
216};
217
218static cpumask_t cstate_core_cpu_mask;
219static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
220
221static struct attribute *cstate_cpumask_attrs[] = {
222 &dev_attr_cpumask.attr,
223 NULL,
224};
225
226static struct attribute_group cpumask_attr_group = {
227 .attrs = cstate_cpumask_attrs,
228};
229
230static const struct attribute_group *core_attr_groups[] = {
231 &core_events_attr_group,
232 &core_format_attr_group,
233 &cpumask_attr_group,
234 NULL,
235};
236
237/* cstate_core PMU end */
238
239
240/* cstate_pkg PMU */
241
242static struct pmu cstate_pkg_pmu;
243static bool has_cstate_pkg;
244
245enum perf_cstate_pkg_id {
246 /*
247 * cstate_pkg events
248 */
249 PERF_CSTATE_PKG_C2_RES = 0,
250 PERF_CSTATE_PKG_C3_RES,
251 PERF_CSTATE_PKG_C6_RES,
252 PERF_CSTATE_PKG_C7_RES,
253 PERF_CSTATE_PKG_C8_RES,
254 PERF_CSTATE_PKG_C9_RES,
255 PERF_CSTATE_PKG_C10_RES,
256
257 PERF_CSTATE_PKG_EVENT_MAX,
258};
259
260bool test_pkg(int idx)
261{
262 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
263 boot_cpu_data.x86 != 6)
264 return false;
265
266 switch (boot_cpu_data.x86_model) {
267 case 30: /* 45nm Nehalem */
268 case 26: /* 45nm Nehalem-EP */
269 case 46: /* 45nm Nehalem-EX */
270
271 case 37: /* 32nm Westmere */
272 case 44: /* 32nm Westmere-EP */
273 case 47: /* 32nm Westmere-EX */
274 if (idx == PERF_CSTATE_CORE_C3_RES ||
275 idx == PERF_CSTATE_CORE_C6_RES ||
276 idx == PERF_CSTATE_CORE_C7_RES)
277 return true;
278 break;
279 case 42: /* 32nm SandyBridge */
280 case 45: /* 32nm SandyBridge-E/EN/EP */
281
282 case 58: /* 22nm IvyBridge */
283 case 62: /* 22nm IvyBridge-EP/EX */
284
285 case 60: /* 22nm Haswell Core */
286 case 63: /* 22nm Haswell Server */
287 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
288
289 case 61: /* 14nm Broadwell Core-M */
290 case 86: /* 14nm Broadwell Xeon D */
291 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
292 case 79: /* 14nm Broadwell Server */
293
294 case 78: /* 14nm Skylake Mobile */
295 case 94: /* 14nm Skylake Desktop */
296 if (idx == PERF_CSTATE_PKG_C2_RES ||
297 idx == PERF_CSTATE_PKG_C3_RES ||
298 idx == PERF_CSTATE_PKG_C6_RES ||
299 idx == PERF_CSTATE_PKG_C7_RES)
300 return true;
301 break;
302 case 55: /* 22nm Atom "Silvermont" */
303 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
304 case 76: /* 14nm Atom "Airmont" */
305 if (idx == PERF_CSTATE_CORE_C6_RES)
306 return true;
307 break;
308 case 69: /* 22nm Haswell ULT */
309 if (idx == PERF_CSTATE_PKG_C2_RES ||
310 idx == PERF_CSTATE_PKG_C3_RES ||
311 idx == PERF_CSTATE_PKG_C6_RES ||
312 idx == PERF_CSTATE_PKG_C7_RES ||
313 idx == PERF_CSTATE_PKG_C8_RES ||
314 idx == PERF_CSTATE_PKG_C9_RES ||
315 idx == PERF_CSTATE_PKG_C10_RES)
316 return true;
317 break;
318 }
319
320 return false;
321}
322
323PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
324PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
325PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
326PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03");
327PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04");
328PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
329PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
330
331static struct perf_cstate_msr pkg_msr[] = {
332 [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &evattr_cstate_pkg_c2, test_pkg, },
333 [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &evattr_cstate_pkg_c3, test_pkg, },
334 [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &evattr_cstate_pkg_c6, test_pkg, },
335 [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &evattr_cstate_pkg_c7, test_pkg, },
336 [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &evattr_cstate_pkg_c8, test_pkg, },
337 [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &evattr_cstate_pkg_c9, test_pkg, },
338 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &evattr_cstate_pkg_c10, test_pkg, },
339};
340
341static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
342 NULL,
343};
344
345static struct attribute_group pkg_events_attr_group = {
346 .name = "events",
347 .attrs = pkg_events_attrs,
348};
349
350DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
351static struct attribute *pkg_format_attrs[] = {
352 &format_attr_pkg_event.attr,
353 NULL,
354};
355static struct attribute_group pkg_format_attr_group = {
356 .name = "format",
357 .attrs = pkg_format_attrs,
358};
359
360static cpumask_t cstate_pkg_cpu_mask;
361
362static const struct attribute_group *pkg_attr_groups[] = {
363 &pkg_events_attr_group,
364 &pkg_format_attr_group,
365 &cpumask_attr_group,
366 NULL,
367};
368
369/* cstate_pkg PMU end*/
370
371static ssize_t cstate_get_attr_cpumask(struct device *dev,
372 struct device_attribute *attr,
373 char *buf)
374{
375 struct pmu *pmu = dev_get_drvdata(dev);
376
377 if (pmu == &cstate_core_pmu)
378 return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
379 else if (pmu == &cstate_pkg_pmu)
380 return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
381 else
382 return 0;
383}
384
385static int cstate_pmu_event_init(struct perf_event *event)
386{
387 u64 cfg = event->attr.config;
388 int ret = 0;
389
390 if (event->attr.type != event->pmu->type)
391 return -ENOENT;
392
393 /* unsupported modes and filters */
394 if (event->attr.exclude_user ||
395 event->attr.exclude_kernel ||
396 event->attr.exclude_hv ||
397 event->attr.exclude_idle ||
398 event->attr.exclude_host ||
399 event->attr.exclude_guest ||
400 event->attr.sample_period) /* no sampling */
401 return -EINVAL;
402
403 if (event->pmu == &cstate_core_pmu) {
404 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
405 return -EINVAL;
406 if (!core_msr[cfg].attr)
407 return -EINVAL;
408 event->hw.event_base = core_msr[cfg].msr;
409 } else if (event->pmu == &cstate_pkg_pmu) {
410 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
411 return -EINVAL;
412 if (!pkg_msr[cfg].attr)
413 return -EINVAL;
414 event->hw.event_base = pkg_msr[cfg].msr;
415 } else
416 return -ENOENT;
417
418 /* must be done before validate_group */
419 event->hw.config = cfg;
420 event->hw.idx = -1;
421
422 return ret;
423}
424
425static inline u64 cstate_pmu_read_counter(struct perf_event *event)
426{
427 u64 val;
428
429 rdmsrl(event->hw.event_base, val);
430 return val;
431}
432
433static void cstate_pmu_event_update(struct perf_event *event)
434{
435 struct hw_perf_event *hwc = &event->hw;
436 u64 prev_raw_count, new_raw_count;
437
438again:
439 prev_raw_count = local64_read(&hwc->prev_count);
440 new_raw_count = cstate_pmu_read_counter(event);
441
442 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
443 new_raw_count) != prev_raw_count)
444 goto again;
445
446 local64_add(new_raw_count - prev_raw_count, &event->count);
447}
448
449static void cstate_pmu_event_start(struct perf_event *event, int mode)
450{
451 local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
452}
453
454static void cstate_pmu_event_stop(struct perf_event *event, int mode)
455{
456 cstate_pmu_event_update(event);
457}
458
459static void cstate_pmu_event_del(struct perf_event *event, int mode)
460{
461 cstate_pmu_event_stop(event, PERF_EF_UPDATE);
462}
463
464static int cstate_pmu_event_add(struct perf_event *event, int mode)
465{
466 if (mode & PERF_EF_START)
467 cstate_pmu_event_start(event, mode);
468
469 return 0;
470}
471
472static void cstate_cpu_exit(int cpu)
473{
474 int i, id, target;
475
476 /* cpu exit for cstate core */
477 if (has_cstate_core) {
478 id = topology_core_id(cpu);
479 target = -1;
480
481 for_each_online_cpu(i) {
482 if (i == cpu)
483 continue;
484 if (id == topology_core_id(i)) {
485 target = i;
486 break;
487 }
488 }
489 if (cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask) && target >= 0)
490 cpumask_set_cpu(target, &cstate_core_cpu_mask);
491 WARN_ON(cpumask_empty(&cstate_core_cpu_mask));
492 if (target >= 0)
493 perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
494 }
495
496 /* cpu exit for cstate pkg */
497 if (has_cstate_pkg) {
498 id = topology_physical_package_id(cpu);
499 target = -1;
500
501 for_each_online_cpu(i) {
502 if (i == cpu)
503 continue;
504 if (id == topology_physical_package_id(i)) {
505 target = i;
506 break;
507 }
508 }
509 if (cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask) && target >= 0)
510 cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
511 WARN_ON(cpumask_empty(&cstate_pkg_cpu_mask));
512 if (target >= 0)
513 perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
514 }
515}
516
517static void cstate_cpu_init(int cpu)
518{
519 int i, id;
520
521 /* cpu init for cstate core */
522 if (has_cstate_core) {
523 id = topology_core_id(cpu);
524 for_each_cpu(i, &cstate_core_cpu_mask) {
525 if (id == topology_core_id(i))
526 break;
527 }
528 if (i >= nr_cpu_ids)
529 cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
530 }
531
532 /* cpu init for cstate pkg */
533 if (has_cstate_pkg) {
534 id = topology_physical_package_id(cpu);
535 for_each_cpu(i, &cstate_pkg_cpu_mask) {
536 if (id == topology_physical_package_id(i))
537 break;
538 }
539 if (i >= nr_cpu_ids)
540 cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
541 }
542}
543
544static int cstate_cpu_notifier(struct notifier_block *self,
545 unsigned long action, void *hcpu)
546{
547 unsigned int cpu = (long)hcpu;
548
549 switch (action & ~CPU_TASKS_FROZEN) {
550 case CPU_UP_PREPARE:
551 break;
552 case CPU_STARTING:
553 cstate_cpu_init(cpu);
554 break;
555 case CPU_UP_CANCELED:
556 case CPU_DYING:
557 break;
558 case CPU_ONLINE:
559 case CPU_DEAD:
560 break;
561 case CPU_DOWN_PREPARE:
562 cstate_cpu_exit(cpu);
563 break;
564 default:
565 break;
566 }
567
568 return NOTIFY_OK;
569}
570
571/*
572 * Probe the cstate events and insert the available one into sysfs attrs
573 * Return false if there is no available events.
574 */
575static bool cstate_probe_msr(struct perf_cstate_msr *msr,
576 struct attribute **events_attrs,
577 int max_event_nr)
578{
579 int i, j = 0;
580 u64 val;
581
582 /* Probe the cstate events. */
583 for (i = 0; i < max_event_nr; i++) {
584 if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
585 msr[i].attr = NULL;
586 }
587
588 /* List remaining events in the sysfs attrs. */
589 for (i = 0; i < max_event_nr; i++) {
590 if (msr[i].attr)
591 events_attrs[j++] = &msr[i].attr->attr.attr;
592 }
593 events_attrs[j] = NULL;
594
595 return (j > 0) ? true : false;
596}
597
598static int __init cstate_init(void)
599{
600 /* SLM has different MSR for PKG C6 */
601 switch (boot_cpu_data.x86_model) {
602 case 55:
603 case 76:
604 case 77:
605 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
606 }
607
608 if (cstate_probe_msr(core_msr, core_events_attrs, PERF_CSTATE_CORE_EVENT_MAX))
609 has_cstate_core = true;
610
611 if (cstate_probe_msr(pkg_msr, pkg_events_attrs, PERF_CSTATE_PKG_EVENT_MAX))
612 has_cstate_pkg = true;
613
614 return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
615}
616
617static void __init cstate_cpumask_init(void)
618{
619 int cpu;
620
621 cpu_notifier_register_begin();
622
623 for_each_online_cpu(cpu)
624 cstate_cpu_init(cpu);
625
626 __perf_cpu_notifier(cstate_cpu_notifier);
627
628 cpu_notifier_register_done();
629}
630
631static struct pmu cstate_core_pmu = {
632 .attr_groups = core_attr_groups,
633 .name = "cstate_core",
634 .task_ctx_nr = perf_invalid_context,
635 .event_init = cstate_pmu_event_init,
636 .add = cstate_pmu_event_add, /* must have */
637 .del = cstate_pmu_event_del, /* must have */
638 .start = cstate_pmu_event_start,
639 .stop = cstate_pmu_event_stop,
640 .read = cstate_pmu_event_update,
641 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
642};
643
644static struct pmu cstate_pkg_pmu = {
645 .attr_groups = pkg_attr_groups,
646 .name = "cstate_pkg",
647 .task_ctx_nr = perf_invalid_context,
648 .event_init = cstate_pmu_event_init,
649 .add = cstate_pmu_event_add, /* must have */
650 .del = cstate_pmu_event_del, /* must have */
651 .start = cstate_pmu_event_start,
652 .stop = cstate_pmu_event_stop,
653 .read = cstate_pmu_event_update,
654 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
655};
656
657static void __init cstate_pmus_register(void)
658{
659 int err;
660
661 if (has_cstate_core) {
662 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
663 if (WARN_ON(err))
664 pr_info("Failed to register PMU %s error %d\n",
665 cstate_core_pmu.name, err);
666 }
667
668 if (has_cstate_pkg) {
669 err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1);
670 if (WARN_ON(err))
671 pr_info("Failed to register PMU %s error %d\n",
672 cstate_pkg_pmu.name, err);
673 }
674}
675
676static int __init cstate_pmu_init(void)
677{
678 int err;
679
680 if (cpu_has_hypervisor)
681 return -ENODEV;
682
683 err = cstate_init();
684 if (err)
685 return err;
686
687 cstate_cpumask_init();
688
689 cstate_pmus_register();
690
691 return 0;
692}
693
694device_initcall(cstate_pmu_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 84f236ab96b0..5db1c7755548 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -510,10 +510,11 @@ int intel_pmu_drain_bts_buffer(void)
510 u64 flags; 510 u64 flags;
511 }; 511 };
512 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; 512 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
513 struct bts_record *at, *top; 513 struct bts_record *at, *base, *top;
514 struct perf_output_handle handle; 514 struct perf_output_handle handle;
515 struct perf_event_header header; 515 struct perf_event_header header;
516 struct perf_sample_data data; 516 struct perf_sample_data data;
517 unsigned long skip = 0;
517 struct pt_regs regs; 518 struct pt_regs regs;
518 519
519 if (!event) 520 if (!event)
@@ -522,10 +523,10 @@ int intel_pmu_drain_bts_buffer(void)
522 if (!x86_pmu.bts_active) 523 if (!x86_pmu.bts_active)
523 return 0; 524 return 0;
524 525
525 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; 526 base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
526 top = (struct bts_record *)(unsigned long)ds->bts_index; 527 top = (struct bts_record *)(unsigned long)ds->bts_index;
527 528
528 if (top <= at) 529 if (top <= base)
529 return 0; 530 return 0;
530 531
531 memset(&regs, 0, sizeof(regs)); 532 memset(&regs, 0, sizeof(regs));
@@ -535,16 +536,43 @@ int intel_pmu_drain_bts_buffer(void)
535 perf_sample_data_init(&data, 0, event->hw.last_period); 536 perf_sample_data_init(&data, 0, event->hw.last_period);
536 537
537 /* 538 /*
539 * BTS leaks kernel addresses in branches across the cpl boundary,
540 * such as traps or system calls, so unless the user is asking for
541 * kernel tracing (and right now it's not possible), we'd need to
542 * filter them out. But first we need to count how many of those we
543 * have in the current batch. This is an extra O(n) pass, however,
544 * it's much faster than the other one especially considering that
545 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
546 * alloc_bts_buffer()).
547 */
548 for (at = base; at < top; at++) {
549 /*
550 * Note that right now *this* BTS code only works if
551 * attr::exclude_kernel is set, but let's keep this extra
552 * check here in case that changes.
553 */
554 if (event->attr.exclude_kernel &&
555 (kernel_ip(at->from) || kernel_ip(at->to)))
556 skip++;
557 }
558
559 /*
538 * Prepare a generic sample, i.e. fill in the invariant fields. 560 * Prepare a generic sample, i.e. fill in the invariant fields.
539 * We will overwrite the from and to address before we output 561 * We will overwrite the from and to address before we output
540 * the sample. 562 * the sample.
541 */ 563 */
542 perf_prepare_sample(&header, &data, event, &regs); 564 perf_prepare_sample(&header, &data, event, &regs);
543 565
544 if (perf_output_begin(&handle, event, header.size * (top - at))) 566 if (perf_output_begin(&handle, event, header.size *
567 (top - base - skip)))
545 return 1; 568 return 1;
546 569
547 for (; at < top; at++) { 570 for (at = base; at < top; at++) {
571 /* Filter out any records that contain kernel addresses. */
572 if (event->attr.exclude_kernel &&
573 (kernel_ip(at->from) || kernel_ip(at->to)))
574 continue;
575
548 data.ip = at->from; 576 data.ip = at->from;
549 data.addr = at->to; 577 data.addr = at->to;
550 578
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index b2c9475b7ff2..bfd0b717e944 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -151,10 +151,10 @@ static void __intel_pmu_lbr_enable(bool pmi)
151 * No need to reprogram LBR_SELECT in a PMI, as it 151 * No need to reprogram LBR_SELECT in a PMI, as it
152 * did not change. 152 * did not change.
153 */ 153 */
154 if (cpuc->lbr_sel && !pmi) { 154 if (cpuc->lbr_sel)
155 lbr_select = cpuc->lbr_sel->config; 155 lbr_select = cpuc->lbr_sel->config;
156 if (!pmi)
156 wrmsrl(MSR_LBR_SELECT, lbr_select); 157 wrmsrl(MSR_LBR_SELECT, lbr_select);
157 }
158 158
159 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 159 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
160 orig_debugctl = debugctl; 160 orig_debugctl = debugctl;
@@ -555,6 +555,8 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
555 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP) 555 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
556 mask |= X86_BR_IND_JMP; 556 mask |= X86_BR_IND_JMP;
557 557
558 if (br_type & PERF_SAMPLE_BRANCH_CALL)
559 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
558 /* 560 /*
559 * stash actual user request into reg, it may 561 * stash actual user request into reg, it may
560 * be used by fixup code for some CPU 562 * be used by fixup code for some CPU
@@ -890,6 +892,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
890 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, 892 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
891 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, 893 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
892 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP, 894 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
895 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
893}; 896};
894 897
895static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { 898static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
@@ -905,6 +908,7 @@ static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
905 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL 908 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
906 | LBR_RETURN | LBR_CALL_STACK, 909 | LBR_RETURN | LBR_CALL_STACK,
907 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP, 910 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
911 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
908}; 912};
909 913
910/* core */ 914/* core */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
index 42169283448b..868e1194337f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
@@ -139,9 +139,6 @@ static int __init pt_pmu_hw_init(void)
139 long i; 139 long i;
140 140
141 attrs = NULL; 141 attrs = NULL;
142 ret = -ENODEV;
143 if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT))
144 goto fail;
145 142
146 for (i = 0; i < PT_CPUID_LEAVES; i++) { 143 for (i = 0; i < PT_CPUID_LEAVES; i++) {
147 cpuid_count(20, i, 144 cpuid_count(20, i,
@@ -1130,6 +1127,10 @@ static __init int pt_init(void)
1130 int ret, cpu, prior_warn = 0; 1127 int ret, cpu, prior_warn = 0;
1131 1128
1132 BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE); 1129 BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1130
1131 if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT))
1132 return -ENODEV;
1133
1133 get_online_cpus(); 1134 get_online_cpus();
1134 for_each_online_cpu(cpu) { 1135 for_each_online_cpu(cpu) {
1135 u64 ctl; 1136 u64 ctl;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 560e5255b15e..61215a69b03d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -7,7 +7,8 @@ struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
7static bool pcidrv_registered; 7static bool pcidrv_registered;
8struct pci_driver *uncore_pci_driver; 8struct pci_driver *uncore_pci_driver;
9/* pci bus to socket mapping */ 9/* pci bus to socket mapping */
10int uncore_pcibus_to_physid[256] = { [0 ... 255] = -1, }; 10DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
11struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
11struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; 12struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
12 13
13static DEFINE_RAW_SPINLOCK(uncore_box_lock); 14static DEFINE_RAW_SPINLOCK(uncore_box_lock);
@@ -20,6 +21,59 @@ static struct event_constraint uncore_constraint_fixed =
20struct event_constraint uncore_constraint_empty = 21struct event_constraint uncore_constraint_empty =
21 EVENT_CONSTRAINT(0, 0, 0); 22 EVENT_CONSTRAINT(0, 0, 0);
22 23
24int uncore_pcibus_to_physid(struct pci_bus *bus)
25{
26 struct pci2phy_map *map;
27 int phys_id = -1;
28
29 raw_spin_lock(&pci2phy_map_lock);
30 list_for_each_entry(map, &pci2phy_map_head, list) {
31 if (map->segment == pci_domain_nr(bus)) {
32 phys_id = map->pbus_to_physid[bus->number];
33 break;
34 }
35 }
36 raw_spin_unlock(&pci2phy_map_lock);
37
38 return phys_id;
39}
40
41struct pci2phy_map *__find_pci2phy_map(int segment)
42{
43 struct pci2phy_map *map, *alloc = NULL;
44 int i;
45
46 lockdep_assert_held(&pci2phy_map_lock);
47
48lookup:
49 list_for_each_entry(map, &pci2phy_map_head, list) {
50 if (map->segment == segment)
51 goto end;
52 }
53
54 if (!alloc) {
55 raw_spin_unlock(&pci2phy_map_lock);
56 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
57 raw_spin_lock(&pci2phy_map_lock);
58
59 if (!alloc)
60 return NULL;
61
62 goto lookup;
63 }
64
65 map = alloc;
66 alloc = NULL;
67 map->segment = segment;
68 for (i = 0; i < 256; i++)
69 map->pbus_to_physid[i] = -1;
70 list_add_tail(&map->list, &pci2phy_map_head);
71
72end:
73 kfree(alloc);
74 return map;
75}
76
23ssize_t uncore_event_show(struct kobject *kobj, 77ssize_t uncore_event_show(struct kobject *kobj,
24 struct kobj_attribute *attr, char *buf) 78 struct kobj_attribute *attr, char *buf)
25{ 79{
@@ -809,7 +863,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
809 int phys_id; 863 int phys_id;
810 bool first_box = false; 864 bool first_box = false;
811 865
812 phys_id = uncore_pcibus_to_physid[pdev->bus->number]; 866 phys_id = uncore_pcibus_to_physid(pdev->bus);
813 if (phys_id < 0) 867 if (phys_id < 0)
814 return -ENODEV; 868 return -ENODEV;
815 869
@@ -856,9 +910,10 @@ static void uncore_pci_remove(struct pci_dev *pdev)
856{ 910{
857 struct intel_uncore_box *box = pci_get_drvdata(pdev); 911 struct intel_uncore_box *box = pci_get_drvdata(pdev);
858 struct intel_uncore_pmu *pmu; 912 struct intel_uncore_pmu *pmu;
859 int i, cpu, phys_id = uncore_pcibus_to_physid[pdev->bus->number]; 913 int i, cpu, phys_id;
860 bool last_box = false; 914 bool last_box = false;
861 915
916 phys_id = uncore_pcibus_to_physid(pdev->bus);
862 box = pci_get_drvdata(pdev); 917 box = pci_get_drvdata(pdev);
863 if (!box) { 918 if (!box) {
864 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { 919 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 72c54c2e5b1a..2f0a4a98e16b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -117,6 +117,15 @@ struct uncore_event_desc {
117 const char *config; 117 const char *config;
118}; 118};
119 119
120struct pci2phy_map {
121 struct list_head list;
122 int segment;
123 int pbus_to_physid[256];
124};
125
126int uncore_pcibus_to_physid(struct pci_bus *bus);
127struct pci2phy_map *__find_pci2phy_map(int segment);
128
120ssize_t uncore_event_show(struct kobject *kobj, 129ssize_t uncore_event_show(struct kobject *kobj,
121 struct kobj_attribute *attr, char *buf); 130 struct kobj_attribute *attr, char *buf);
122 131
@@ -317,7 +326,8 @@ u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
317extern struct intel_uncore_type **uncore_msr_uncores; 326extern struct intel_uncore_type **uncore_msr_uncores;
318extern struct intel_uncore_type **uncore_pci_uncores; 327extern struct intel_uncore_type **uncore_pci_uncores;
319extern struct pci_driver *uncore_pci_driver; 328extern struct pci_driver *uncore_pci_driver;
320extern int uncore_pcibus_to_physid[256]; 329extern raw_spinlock_t pci2phy_map_lock;
330extern struct list_head pci2phy_map_head;
321extern struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; 331extern struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
322extern struct event_constraint uncore_constraint_empty; 332extern struct event_constraint uncore_constraint_empty;
323 333
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
index f78574b3cb55..845256158a10 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
@@ -420,15 +420,25 @@ static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
420static int snb_pci2phy_map_init(int devid) 420static int snb_pci2phy_map_init(int devid)
421{ 421{
422 struct pci_dev *dev = NULL; 422 struct pci_dev *dev = NULL;
423 int bus; 423 struct pci2phy_map *map;
424 int bus, segment;
424 425
425 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); 426 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
426 if (!dev) 427 if (!dev)
427 return -ENOTTY; 428 return -ENOTTY;
428 429
429 bus = dev->bus->number; 430 bus = dev->bus->number;
430 431 segment = pci_domain_nr(dev->bus);
431 uncore_pcibus_to_physid[bus] = 0; 432
433 raw_spin_lock(&pci2phy_map_lock);
434 map = __find_pci2phy_map(segment);
435 if (!map) {
436 raw_spin_unlock(&pci2phy_map_lock);
437 pci_dev_put(dev);
438 return -ENOMEM;
439 }
440 map->pbus_to_physid[bus] = 0;
441 raw_spin_unlock(&pci2phy_map_lock);
432 442
433 pci_dev_put(dev); 443 pci_dev_put(dev);
434 444
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index 694510a887dc..f0f4fcba252e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -1087,7 +1087,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
1087static int snbep_pci2phy_map_init(int devid) 1087static int snbep_pci2phy_map_init(int devid)
1088{ 1088{
1089 struct pci_dev *ubox_dev = NULL; 1089 struct pci_dev *ubox_dev = NULL;
1090 int i, bus, nodeid; 1090 int i, bus, nodeid, segment;
1091 struct pci2phy_map *map;
1091 int err = 0; 1092 int err = 0;
1092 u32 config = 0; 1093 u32 config = 0;
1093 1094
@@ -1106,16 +1107,27 @@ static int snbep_pci2phy_map_init(int devid)
1106 err = pci_read_config_dword(ubox_dev, 0x54, &config); 1107 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1107 if (err) 1108 if (err)
1108 break; 1109 break;
1110
1111 segment = pci_domain_nr(ubox_dev->bus);
1112 raw_spin_lock(&pci2phy_map_lock);
1113 map = __find_pci2phy_map(segment);
1114 if (!map) {
1115 raw_spin_unlock(&pci2phy_map_lock);
1116 err = -ENOMEM;
1117 break;
1118 }
1119
1109 /* 1120 /*
1110 * every three bits in the Node ID mapping register maps 1121 * every three bits in the Node ID mapping register maps
1111 * to a particular node. 1122 * to a particular node.
1112 */ 1123 */
1113 for (i = 0; i < 8; i++) { 1124 for (i = 0; i < 8; i++) {
1114 if (nodeid == ((config >> (3 * i)) & 0x7)) { 1125 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1115 uncore_pcibus_to_physid[bus] = i; 1126 map->pbus_to_physid[bus] = i;
1116 break; 1127 break;
1117 } 1128 }
1118 } 1129 }
1130 raw_spin_unlock(&pci2phy_map_lock);
1119 } 1131 }
1120 1132
1121 if (!err) { 1133 if (!err) {
@@ -1123,13 +1135,17 @@ static int snbep_pci2phy_map_init(int devid)
1123 * For PCI bus with no UBOX device, find the next bus 1135 * For PCI bus with no UBOX device, find the next bus
1124 * that has UBOX device and use its mapping. 1136 * that has UBOX device and use its mapping.
1125 */ 1137 */
1126 i = -1; 1138 raw_spin_lock(&pci2phy_map_lock);
1127 for (bus = 255; bus >= 0; bus--) { 1139 list_for_each_entry(map, &pci2phy_map_head, list) {
1128 if (uncore_pcibus_to_physid[bus] >= 0) 1140 i = -1;
1129 i = uncore_pcibus_to_physid[bus]; 1141 for (bus = 255; bus >= 0; bus--) {
1130 else 1142 if (map->pbus_to_physid[bus] >= 0)
1131 uncore_pcibus_to_physid[bus] = i; 1143 i = map->pbus_to_physid[bus];
1144 else
1145 map->pbus_to_physid[bus] = i;
1146 }
1132 } 1147 }
1148 raw_spin_unlock(&pci2phy_map_lock);
1133 } 1149 }
1134 1150
1135 pci_dev_put(ubox_dev); 1151 pci_dev_put(ubox_dev);
@@ -2444,7 +2460,7 @@ static struct intel_uncore_type *bdx_pci_uncores[] = {
2444 NULL, 2460 NULL,
2445}; 2461};
2446 2462
2447static DEFINE_PCI_DEVICE_TABLE(bdx_uncore_pci_ids) = { 2463static const struct pci_device_id bdx_uncore_pci_ids[] = {
2448 { /* Home Agent 0 */ 2464 { /* Home Agent 0 */
2449 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30), 2465 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
2450 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0), 2466 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 74ca2fe7a0b3..2c1910f6717e 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -75,8 +75,6 @@ struct crash_memmap_data {
75 unsigned int type; 75 unsigned int type;
76}; 76};
77 77
78int in_crash_kexec;
79
80/* 78/*
81 * This is used to VMCLEAR all VMCSs loaded on the 79 * This is used to VMCLEAR all VMCSs loaded on the
82 * processor. And when loading kvm_intel module, the 80 * processor. And when loading kvm_intel module, the
@@ -132,7 +130,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
132 130
133static void kdump_nmi_shootdown_cpus(void) 131static void kdump_nmi_shootdown_cpus(void)
134{ 132{
135 in_crash_kexec = 1;
136 nmi_shootdown_cpus(kdump_nmi_callback); 133 nmi_shootdown_cpus(kdump_nmi_callback);
137 134
138 disable_local_APIC(); 135 disable_local_APIC();
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index a102564d08eb..569c1e4f96fe 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -911,7 +911,7 @@ void __init finish_e820_parsing(void)
911 } 911 }
912} 912}
913 913
914static inline const char *e820_type_to_string(int e820_type) 914static const char *e820_type_to_string(int e820_type)
915{ 915{
916 switch (e820_type) { 916 switch (e820_type) {
917 case E820_RESERVED_KERN: 917 case E820_RESERVED_KERN:
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 9f9cc682e561..db9a675e751b 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -584,7 +584,7 @@ static void __init intel_graphics_stolen(int num, int slot, int func)
584static void __init force_disable_hpet(int num, int slot, int func) 584static void __init force_disable_hpet(int num, int slot, int func)
585{ 585{
586#ifdef CONFIG_HPET_TIMER 586#ifdef CONFIG_HPET_TIMER
587 boot_hpet_disable = 1; 587 boot_hpet_disable = true;
588 pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); 588 pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n");
589#endif 589#endif
590} 590}
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index eec40f595ab9..21bf92490a7b 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -195,14 +195,14 @@ static __init void early_serial_init(char *s)
195#ifdef CONFIG_PCI 195#ifdef CONFIG_PCI
196static void mem32_serial_out(unsigned long addr, int offset, int value) 196static void mem32_serial_out(unsigned long addr, int offset, int value)
197{ 197{
198 u32 *vaddr = (u32 *)addr; 198 u32 __iomem *vaddr = (u32 __iomem *)addr;
199 /* shift implied by pointer type */ 199 /* shift implied by pointer type */
200 writel(value, vaddr + offset); 200 writel(value, vaddr + offset);
201} 201}
202 202
203static unsigned int mem32_serial_in(unsigned long addr, int offset) 203static unsigned int mem32_serial_in(unsigned long addr, int offset)
204{ 204{
205 u32 *vaddr = (u32 *)addr; 205 u32 __iomem *vaddr = (u32 __iomem *)addr;
206 /* shift implied by pointer type */ 206 /* shift implied by pointer type */
207 return readl(vaddr + offset); 207 return readl(vaddr + offset);
208} 208}
@@ -316,7 +316,7 @@ static struct console early_serial_console = {
316 .index = -1, 316 .index = -1,
317}; 317};
318 318
319static inline void early_console_register(struct console *con, int keep_early) 319static void early_console_register(struct console *con, int keep_early)
320{ 320{
321 if (con->index != -1) { 321 if (con->index != -1) {
322 printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n", 322 printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 0e2d96ffd158..6bc9ae24b6d2 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -152,7 +152,7 @@ ENTRY(startup_32)
152 movl %eax, pa(olpc_ofw_pgd) 152 movl %eax, pa(olpc_ofw_pgd)
153#endif 153#endif
154 154
155#ifdef CONFIG_MICROCODE_EARLY 155#ifdef CONFIG_MICROCODE
156 /* Early load ucode on BSP. */ 156 /* Early load ucode on BSP. */
157 call load_ucode_bsp 157 call load_ucode_bsp
158#endif 158#endif
@@ -311,12 +311,11 @@ ENTRY(startup_32_smp)
311 movl %eax,%ss 311 movl %eax,%ss
312 leal -__PAGE_OFFSET(%ecx),%esp 312 leal -__PAGE_OFFSET(%ecx),%esp
313 313
314#ifdef CONFIG_MICROCODE_EARLY 314#ifdef CONFIG_MICROCODE
315 /* Early load ucode on AP. */ 315 /* Early load ucode on AP. */
316 call load_ucode_ap 316 call load_ucode_ap
317#endif 317#endif
318 318
319
320default_entry: 319default_entry:
321#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 320#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
322 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 321 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 88b4da373081..b8e6ff5cd5d0 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -37,10 +37,10 @@
37 */ 37 */
38unsigned long hpet_address; 38unsigned long hpet_address;
39u8 hpet_blockid; /* OS timer block num */ 39u8 hpet_blockid; /* OS timer block num */
40u8 hpet_msi_disable; 40bool hpet_msi_disable;
41 41
42#ifdef CONFIG_PCI_MSI 42#ifdef CONFIG_PCI_MSI
43static unsigned long hpet_num_timers; 43static unsigned int hpet_num_timers;
44#endif 44#endif
45static void __iomem *hpet_virt_address; 45static void __iomem *hpet_virt_address;
46 46
@@ -86,9 +86,9 @@ static inline void hpet_clear_mapping(void)
86/* 86/*
87 * HPET command line enable / disable 87 * HPET command line enable / disable
88 */ 88 */
89int boot_hpet_disable; 89bool boot_hpet_disable;
90int hpet_force_user; 90bool hpet_force_user;
91static int hpet_verbose; 91static bool hpet_verbose;
92 92
93static int __init hpet_setup(char *str) 93static int __init hpet_setup(char *str)
94{ 94{
@@ -98,11 +98,11 @@ static int __init hpet_setup(char *str)
98 if (next) 98 if (next)
99 *next++ = 0; 99 *next++ = 0;
100 if (!strncmp("disable", str, 7)) 100 if (!strncmp("disable", str, 7))
101 boot_hpet_disable = 1; 101 boot_hpet_disable = true;
102 if (!strncmp("force", str, 5)) 102 if (!strncmp("force", str, 5))
103 hpet_force_user = 1; 103 hpet_force_user = true;
104 if (!strncmp("verbose", str, 7)) 104 if (!strncmp("verbose", str, 7))
105 hpet_verbose = 1; 105 hpet_verbose = true;
106 str = next; 106 str = next;
107 } 107 }
108 return 1; 108 return 1;
@@ -111,7 +111,7 @@ __setup("hpet=", hpet_setup);
111 111
112static int __init disable_hpet(char *str) 112static int __init disable_hpet(char *str)
113{ 113{
114 boot_hpet_disable = 1; 114 boot_hpet_disable = true;
115 return 1; 115 return 1;
116} 116}
117__setup("nohpet", disable_hpet); 117__setup("nohpet", disable_hpet);
@@ -124,7 +124,7 @@ static inline int is_hpet_capable(void)
124/* 124/*
125 * HPET timer interrupt enable / disable 125 * HPET timer interrupt enable / disable
126 */ 126 */
127static int hpet_legacy_int_enabled; 127static bool hpet_legacy_int_enabled;
128 128
129/** 129/**
130 * is_hpet_enabled - check whether the hpet timer interrupt is enabled 130 * is_hpet_enabled - check whether the hpet timer interrupt is enabled
@@ -230,7 +230,7 @@ static struct clock_event_device hpet_clockevent;
230 230
231static void hpet_stop_counter(void) 231static void hpet_stop_counter(void)
232{ 232{
233 unsigned long cfg = hpet_readl(HPET_CFG); 233 u32 cfg = hpet_readl(HPET_CFG);
234 cfg &= ~HPET_CFG_ENABLE; 234 cfg &= ~HPET_CFG_ENABLE;
235 hpet_writel(cfg, HPET_CFG); 235 hpet_writel(cfg, HPET_CFG);
236} 236}
@@ -272,7 +272,7 @@ static void hpet_enable_legacy_int(void)
272 272
273 cfg |= HPET_CFG_LEGACY; 273 cfg |= HPET_CFG_LEGACY;
274 hpet_writel(cfg, HPET_CFG); 274 hpet_writel(cfg, HPET_CFG);
275 hpet_legacy_int_enabled = 1; 275 hpet_legacy_int_enabled = true;
276} 276}
277 277
278static void hpet_legacy_clockevent_register(void) 278static void hpet_legacy_clockevent_register(void)
@@ -983,7 +983,7 @@ void hpet_disable(void)
983 cfg = *hpet_boot_cfg; 983 cfg = *hpet_boot_cfg;
984 else if (hpet_legacy_int_enabled) { 984 else if (hpet_legacy_int_enabled) {
985 cfg &= ~HPET_CFG_LEGACY; 985 cfg &= ~HPET_CFG_LEGACY;
986 hpet_legacy_int_enabled = 0; 986 hpet_legacy_int_enabled = false;
987 } 987 }
988 cfg &= ~HPET_CFG_ENABLE; 988 cfg &= ~HPET_CFG_ENABLE;
989 hpet_writel(cfg, HPET_CFG); 989 hpet_writel(cfg, HPET_CFG);
@@ -1121,8 +1121,7 @@ EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
1121 1121
1122static void hpet_disable_rtc_channel(void) 1122static void hpet_disable_rtc_channel(void)
1123{ 1123{
1124 unsigned long cfg; 1124 u32 cfg = hpet_readl(HPET_T1_CFG);
1125 cfg = hpet_readl(HPET_T1_CFG);
1126 cfg &= ~HPET_TN_ENABLE; 1125 cfg &= ~HPET_TN_ENABLE;
1127 hpet_writel(cfg, HPET_T1_CFG); 1126 hpet_writel(cfg, HPET_T1_CFG);
1128} 1127}
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index c767cf2bc80a..206d0b90a3ab 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -72,7 +72,7 @@ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
72{ 72{
73 stack_overflow_check(regs); 73 stack_overflow_check(regs);
74 74
75 if (unlikely(IS_ERR_OR_NULL(desc))) 75 if (IS_ERR_OR_NULL(desc))
76 return false; 76 return false;
77 77
78 generic_handle_irq_desc(desc); 78 generic_handle_irq_desc(desc);
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 1b55de1267cf..cd99433b8ba1 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -131,11 +131,12 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
131 131
132bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) 132bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
133{ 133{
134 if (!*dev)
135 *dev = &x86_dma_fallback_dev;
136
134 *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 137 *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
135 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp); 138 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
136 139
137 if (!*dev)
138 *dev = &x86_dma_fallback_dev;
139 if (!is_device_dma_capable(*dev)) 140 if (!is_device_dma_capable(*dev))
140 return false; 141 return false;
141 return true; 142 return true;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 39e585a554b7..9f7c21c22477 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -84,6 +84,9 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
84int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 84int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
85{ 85{
86 memcpy(dst, src, arch_task_struct_size); 86 memcpy(dst, src, arch_task_struct_size);
87#ifdef CONFIG_VM86
88 dst->thread.vm86 = NULL;
89#endif
87 90
88 return fpu__copy(&dst->thread.fpu, &src->thread.fpu); 91 return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
89} 92}
@@ -550,14 +553,14 @@ unsigned long get_wchan(struct task_struct *p)
550 if (sp < bottom || sp > top) 553 if (sp < bottom || sp > top)
551 return 0; 554 return 0;
552 555
553 fp = READ_ONCE(*(unsigned long *)sp); 556 fp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
554 do { 557 do {
555 if (fp < bottom || fp > top) 558 if (fp < bottom || fp > top)
556 return 0; 559 return 0;
557 ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long))); 560 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
558 if (!in_sched_functions(ip)) 561 if (!in_sched_functions(ip))
559 return ip; 562 return ip;
560 fp = READ_ONCE(*(unsigned long *)fp); 563 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
561 } while (count++ < 16 && p->state != TASK_RUNNING); 564 } while (count++ < 16 && p->state != TASK_RUNNING);
562 return 0; 565 return 0;
563} 566}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 737527b40e5b..9f950917528b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -280,14 +280,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
280 set_iopl_mask(next->iopl); 280 set_iopl_mask(next->iopl);
281 281
282 /* 282 /*
283 * If it were not for PREEMPT_ACTIVE we could guarantee that the
284 * preempt_count of all tasks was equal here and this would not be
285 * needed.
286 */
287 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
288 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
289
290 /*
291 * Now maybe handle debug registers and/or IO bitmaps 283 * Now maybe handle debug registers and/or IO bitmaps
292 */ 284 */
293 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || 285 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b35921a670b2..e835d263a33b 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -332,7 +332,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
332 /* 332 /*
333 * Switch FS and GS. 333 * Switch FS and GS.
334 * 334 *
335 * These are even more complicated than FS and GS: they have 335 * These are even more complicated than DS and ES: they have
336 * 64-bit bases are that controlled by arch_prctl. Those bases 336 * 64-bit bases are that controlled by arch_prctl. Those bases
337 * only differ from the values in the GDT or LDT if the selector 337 * only differ from the values in the GDT or LDT if the selector
338 * is 0. 338 * is 0.
@@ -401,14 +401,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
401 */ 401 */
402 this_cpu_write(current_task, next_p); 402 this_cpu_write(current_task, next_p);
403 403
404 /*
405 * If it were not for PREEMPT_ACTIVE we could guarantee that the
406 * preempt_count of all tasks was equal here and this would not be
407 * needed.
408 */
409 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
410 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
411
412 /* Reload esp0 and ss1. This changes current_thread_info(). */ 404 /* Reload esp0 and ss1. This changes current_thread_info(). */
413 load_sp0(tss, next); 405 load_sp0(tss, next);
414 406
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 176a0f99d4da..cc457ff818ad 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -524,7 +524,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
524 */ 524 */
525static void force_disable_hpet_msi(struct pci_dev *unused) 525static void force_disable_hpet_msi(struct pci_dev *unused)
526{ 526{
527 hpet_msi_disable = 1; 527 hpet_msi_disable = true;
528} 528}
529 529
530DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 530DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index fdb7f2a2d328..a1e4da98c8f0 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -111,6 +111,7 @@
111#include <asm/mce.h> 111#include <asm/mce.h>
112#include <asm/alternative.h> 112#include <asm/alternative.h>
113#include <asm/prom.h> 113#include <asm/prom.h>
114#include <asm/microcode.h>
114 115
115/* 116/*
116 * max_low_pfn_mapped: highest direct mapped pfn under 4GB 117 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -480,34 +481,34 @@ static void __init memblock_x86_reserve_range_setup_data(void)
480 481
481#ifdef CONFIG_KEXEC_CORE 482#ifdef CONFIG_KEXEC_CORE
482 483
484/* 16M alignment for crash kernel regions */
485#define CRASH_ALIGN (16 << 20)
486
483/* 487/*
484 * Keep the crash kernel below this limit. On 32 bits earlier kernels 488 * Keep the crash kernel below this limit. On 32 bits earlier kernels
485 * would limit the kernel to the low 512 MiB due to mapping restrictions. 489 * would limit the kernel to the low 512 MiB due to mapping restrictions.
486 * On 64bit, old kexec-tools need to under 896MiB. 490 * On 64bit, old kexec-tools need to under 896MiB.
487 */ 491 */
488#ifdef CONFIG_X86_32 492#ifdef CONFIG_X86_32
489# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20) 493# define CRASH_ADDR_LOW_MAX (512 << 20)
490# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20) 494# define CRASH_ADDR_HIGH_MAX (512 << 20)
491#else 495#else
492# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20) 496# define CRASH_ADDR_LOW_MAX (896UL << 20)
493# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM 497# define CRASH_ADDR_HIGH_MAX MAXMEM
494#endif 498#endif
495 499
496static void __init reserve_crashkernel_low(void) 500static int __init reserve_crashkernel_low(void)
497{ 501{
498#ifdef CONFIG_X86_64 502#ifdef CONFIG_X86_64
499 const unsigned long long alignment = 16<<20; /* 16M */ 503 unsigned long long base, low_base = 0, low_size = 0;
500 unsigned long long low_base = 0, low_size = 0;
501 unsigned long total_low_mem; 504 unsigned long total_low_mem;
502 unsigned long long base;
503 bool auto_set = false;
504 int ret; 505 int ret;
505 506
506 total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); 507 total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
508
507 /* crashkernel=Y,low */ 509 /* crashkernel=Y,low */
508 ret = parse_crashkernel_low(boot_command_line, total_low_mem, 510 ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
509 &low_size, &base); 511 if (ret) {
510 if (ret != 0) {
511 /* 512 /*
512 * two parts from lib/swiotlb.c: 513 * two parts from lib/swiotlb.c:
513 * -swiotlb size: user-specified with swiotlb= or default. 514 * -swiotlb size: user-specified with swiotlb= or default.
@@ -517,52 +518,52 @@ static void __init reserve_crashkernel_low(void)
517 * make sure we allocate enough extra low memory so that we 518 * make sure we allocate enough extra low memory so that we
518 * don't run out of DMA buffers for 32-bit devices. 519 * don't run out of DMA buffers for 32-bit devices.
519 */ 520 */
520 low_size = max(swiotlb_size_or_default() + (8UL<<20), 256UL<<20); 521 low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
521 auto_set = true;
522 } else { 522 } else {
523 /* passed with crashkernel=0,low ? */ 523 /* passed with crashkernel=0,low ? */
524 if (!low_size) 524 if (!low_size)
525 return; 525 return 0;
526 } 526 }
527 527
528 low_base = memblock_find_in_range(low_size, (1ULL<<32), 528 low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN);
529 low_size, alignment);
530
531 if (!low_base) { 529 if (!low_base) {
532 if (!auto_set) 530 pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
533 pr_info("crashkernel low reservation failed - No suitable area found.\n"); 531 (unsigned long)(low_size >> 20));
532 return -ENOMEM;
533 }
534 534
535 return; 535 ret = memblock_reserve(low_base, low_size);
536 if (ret) {
537 pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
538 return ret;
536 } 539 }
537 540
538 memblock_reserve(low_base, low_size);
539 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n", 541 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
540 (unsigned long)(low_size >> 20), 542 (unsigned long)(low_size >> 20),
541 (unsigned long)(low_base >> 20), 543 (unsigned long)(low_base >> 20),
542 (unsigned long)(total_low_mem >> 20)); 544 (unsigned long)(total_low_mem >> 20));
545
543 crashk_low_res.start = low_base; 546 crashk_low_res.start = low_base;
544 crashk_low_res.end = low_base + low_size - 1; 547 crashk_low_res.end = low_base + low_size - 1;
545 insert_resource(&iomem_resource, &crashk_low_res); 548 insert_resource(&iomem_resource, &crashk_low_res);
546#endif 549#endif
550 return 0;
547} 551}
548 552
549static void __init reserve_crashkernel(void) 553static void __init reserve_crashkernel(void)
550{ 554{
551 const unsigned long long alignment = 16<<20; /* 16M */ 555 unsigned long long crash_size, crash_base, total_mem;
552 unsigned long long total_mem;
553 unsigned long long crash_size, crash_base;
554 bool high = false; 556 bool high = false;
555 int ret; 557 int ret;
556 558
557 total_mem = memblock_phys_mem_size(); 559 total_mem = memblock_phys_mem_size();
558 560
559 /* crashkernel=XM */ 561 /* crashkernel=XM */
560 ret = parse_crashkernel(boot_command_line, total_mem, 562 ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
561 &crash_size, &crash_base);
562 if (ret != 0 || crash_size <= 0) { 563 if (ret != 0 || crash_size <= 0) {
563 /* crashkernel=X,high */ 564 /* crashkernel=X,high */
564 ret = parse_crashkernel_high(boot_command_line, total_mem, 565 ret = parse_crashkernel_high(boot_command_line, total_mem,
565 &crash_size, &crash_base); 566 &crash_size, &crash_base);
566 if (ret != 0 || crash_size <= 0) 567 if (ret != 0 || crash_size <= 0)
567 return; 568 return;
568 high = true; 569 high = true;
@@ -573,11 +574,10 @@ static void __init reserve_crashkernel(void)
573 /* 574 /*
574 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX 575 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
575 */ 576 */
576 crash_base = memblock_find_in_range(alignment, 577 crash_base = memblock_find_in_range(CRASH_ALIGN,
577 high ? CRASH_KERNEL_ADDR_HIGH_MAX : 578 high ? CRASH_ADDR_HIGH_MAX
578 CRASH_KERNEL_ADDR_LOW_MAX, 579 : CRASH_ADDR_LOW_MAX,
579 crash_size, alignment); 580 crash_size, CRASH_ALIGN);
580
581 if (!crash_base) { 581 if (!crash_base) {
582 pr_info("crashkernel reservation failed - No suitable area found.\n"); 582 pr_info("crashkernel reservation failed - No suitable area found.\n");
583 return; 583 return;
@@ -587,26 +587,32 @@ static void __init reserve_crashkernel(void)
587 unsigned long long start; 587 unsigned long long start;
588 588
589 start = memblock_find_in_range(crash_base, 589 start = memblock_find_in_range(crash_base,
590 crash_base + crash_size, crash_size, 1<<20); 590 crash_base + crash_size,
591 crash_size, 1 << 20);
591 if (start != crash_base) { 592 if (start != crash_base) {
592 pr_info("crashkernel reservation failed - memory is in use.\n"); 593 pr_info("crashkernel reservation failed - memory is in use.\n");
593 return; 594 return;
594 } 595 }
595 } 596 }
596 memblock_reserve(crash_base, crash_size); 597 ret = memblock_reserve(crash_base, crash_size);
598 if (ret) {
599 pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
600 return;
601 }
597 602
598 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 603 if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
599 "for crashkernel (System RAM: %ldMB)\n", 604 memblock_free(crash_base, crash_size);
600 (unsigned long)(crash_size >> 20), 605 return;
601 (unsigned long)(crash_base >> 20), 606 }
602 (unsigned long)(total_mem >> 20)); 607
608 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
609 (unsigned long)(crash_size >> 20),
610 (unsigned long)(crash_base >> 20),
611 (unsigned long)(total_mem >> 20));
603 612
604 crashk_res.start = crash_base; 613 crashk_res.start = crash_base;
605 crashk_res.end = crash_base + crash_size - 1; 614 crashk_res.end = crash_base + crash_size - 1;
606 insert_resource(&iomem_resource, &crashk_res); 615 insert_resource(&iomem_resource, &crashk_res);
607
608 if (crash_base >= (1ULL<<32))
609 reserve_crashkernel_low();
610} 616}
611#else 617#else
612static void __init reserve_crashkernel(void) 618static void __init reserve_crashkernel(void)
@@ -1079,8 +1085,10 @@ void __init setup_arch(char **cmdline_p)
1079 memblock_set_current_limit(ISA_END_ADDRESS); 1085 memblock_set_current_limit(ISA_END_ADDRESS);
1080 memblock_x86_fill(); 1086 memblock_x86_fill();
1081 1087
1082 if (efi_enabled(EFI_BOOT)) 1088 if (efi_enabled(EFI_BOOT)) {
1089 efi_fake_memmap();
1083 efi_find_mirror(); 1090 efi_find_mirror();
1091 }
1084 1092
1085 /* 1093 /*
1086 * The EFI specification says that boot service code won't be called 1094 * The EFI specification says that boot service code won't be called
@@ -1173,6 +1181,14 @@ void __init setup_arch(char **cmdline_p)
1173 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, 1181 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1174 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 1182 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1175 KERNEL_PGD_PTRS); 1183 KERNEL_PGD_PTRS);
1184
1185 /*
1186 * sync back low identity map too. It is used for example
1187 * in the 32-bit EFI stub.
1188 */
1189 clone_pgd_range(initial_page_table,
1190 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1191 KERNEL_PGD_PTRS);
1176#endif 1192#endif
1177 1193
1178 tboot_probe(); 1194 tboot_probe();
@@ -1234,6 +1250,8 @@ void __init setup_arch(char **cmdline_p)
1234 if (efi_enabled(EFI_BOOT)) 1250 if (efi_enabled(EFI_BOOT))
1235 efi_apply_memmap_quirks(); 1251 efi_apply_memmap_quirks();
1236#endif 1252#endif
1253
1254 microcode_init();
1237} 1255}
1238 1256
1239#ifdef CONFIG_X86_32 1257#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index da52e6bb5c7f..32165d649979 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
196 return sp; 196 return sp;
197} 197}
198 198
199static inline void __user * 199static void __user *
200get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, 200get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
201 void __user **fpstate) 201 void __user **fpstate)
202{ 202{
@@ -299,7 +299,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
299 299
300 if (current->mm->context.vdso) 300 if (current->mm->context.vdso)
301 restorer = current->mm->context.vdso + 301 restorer = current->mm->context.vdso +
302 selected_vdso32->sym___kernel_sigreturn; 302 vdso_image_32.sym___kernel_sigreturn;
303 else 303 else
304 restorer = &frame->retcode; 304 restorer = &frame->retcode;
305 if (ksig->ka.sa.sa_flags & SA_RESTORER) 305 if (ksig->ka.sa.sa_flags & SA_RESTORER)
@@ -363,7 +363,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
363 363
364 /* Set up to return from userspace. */ 364 /* Set up to return from userspace. */
365 restorer = current->mm->context.vdso + 365 restorer = current->mm->context.vdso +
366 selected_vdso32->sym___kernel_rt_sigreturn; 366 vdso_image_32.sym___kernel_rt_sigreturn;
367 if (ksig->ka.sa.sa_flags & SA_RESTORER) 367 if (ksig->ka.sa.sa_flags & SA_RESTORER)
368 restorer = ksig->ka.sa.sa_restorer; 368 restorer = ksig->ka.sa.sa_restorer;
369 put_user_ex(restorer, &frame->pretcode); 369 put_user_ex(restorer, &frame->pretcode);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e0c198e5f920..892ee2e5ecbc 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
509 */ 509 */
510#define UDELAY_10MS_DEFAULT 10000 510#define UDELAY_10MS_DEFAULT 10000
511 511
512static unsigned int init_udelay = UDELAY_10MS_DEFAULT; 512static unsigned int init_udelay = INT_MAX;
513 513
514static int __init cpu_init_udelay(char *str) 514static int __init cpu_init_udelay(char *str)
515{ 515{
@@ -522,13 +522,16 @@ early_param("cpu_init_udelay", cpu_init_udelay);
522static void __init smp_quirk_init_udelay(void) 522static void __init smp_quirk_init_udelay(void)
523{ 523{
524 /* if cmdline changed it from default, leave it alone */ 524 /* if cmdline changed it from default, leave it alone */
525 if (init_udelay != UDELAY_10MS_DEFAULT) 525 if (init_udelay != INT_MAX)
526 return; 526 return;
527 527
528 /* if modern processor, use no delay */ 528 /* if modern processor, use no delay */
529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || 529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) 530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
531 init_udelay = 0; 531 init_udelay = 0;
532
533 /* else, use legacy delay */
534 init_udelay = UDELAY_10MS_DEFAULT;
532} 535}
533 536
534/* 537/*
@@ -657,7 +660,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
657 /* 660 /*
658 * Give the other CPU some time to accept the IPI. 661 * Give the other CPU some time to accept the IPI.
659 */ 662 */
660 if (init_udelay) 663 if (init_udelay == 0)
664 udelay(10);
665 else
661 udelay(300); 666 udelay(300);
662 667
663 pr_debug("Startup point 1\n"); 668 pr_debug("Startup point 1\n");
@@ -668,7 +673,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
668 /* 673 /*
669 * Give the other CPU some time to accept the IPI. 674 * Give the other CPU some time to accept the IPI.
670 */ 675 */
671 if (init_udelay) 676 if (init_udelay == 0)
677 udelay(10);
678 else
672 udelay(200); 679 udelay(200);
673 680
674 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 681 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c3f7602cd038..c7c4d9c51e99 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -168,21 +168,20 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
168 * ns = cycles * cyc2ns_scale / SC 168 * ns = cycles * cyc2ns_scale / SC
169 * 169 *
170 * And since SC is a constant power of two, we can convert the div 170 * And since SC is a constant power of two, we can convert the div
171 * into a shift. 171 * into a shift. The larger SC is, the more accurate the conversion, but
172 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
173 * (64-bit result) can be used.
172 * 174 *
173 * We can use khz divisor instead of mhz to keep a better precision, since 175 * We can use khz divisor instead of mhz to keep a better precision.
174 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
175 * (mathieu.desnoyers@polymtl.ca) 176 * (mathieu.desnoyers@polymtl.ca)
176 * 177 *
177 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 178 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
178 */ 179 */
179 180
180#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
181
182static void cyc2ns_data_init(struct cyc2ns_data *data) 181static void cyc2ns_data_init(struct cyc2ns_data *data)
183{ 182{
184 data->cyc2ns_mul = 0; 183 data->cyc2ns_mul = 0;
185 data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; 184 data->cyc2ns_shift = 0;
186 data->cyc2ns_offset = 0; 185 data->cyc2ns_offset = 0;
187 data->__count = 0; 186 data->__count = 0;
188} 187}
@@ -216,14 +215,14 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
216 215
217 if (likely(data == tail)) { 216 if (likely(data == tail)) {
218 ns = data->cyc2ns_offset; 217 ns = data->cyc2ns_offset;
219 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); 218 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
220 } else { 219 } else {
221 data->__count++; 220 data->__count++;
222 221
223 barrier(); 222 barrier();
224 223
225 ns = data->cyc2ns_offset; 224 ns = data->cyc2ns_offset;
226 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); 225 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
227 226
228 barrier(); 227 barrier();
229 228
@@ -257,12 +256,22 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
257 * time function is continuous; see the comment near struct 256 * time function is continuous; see the comment near struct
258 * cyc2ns_data. 257 * cyc2ns_data.
259 */ 258 */
260 data->cyc2ns_mul = 259 clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz,
261 DIV_ROUND_CLOSEST(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, 260 NSEC_PER_MSEC, 0);
262 cpu_khz); 261
263 data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; 262 /*
263 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
264 * not expected to be greater than 31 due to the original published
265 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
266 * value) - refer perf_event_mmap_page documentation in perf_event.h.
267 */
268 if (data->cyc2ns_shift == 32) {
269 data->cyc2ns_shift = 31;
270 data->cyc2ns_mul >>= 1;
271 }
272
264 data->cyc2ns_offset = ns_now - 273 data->cyc2ns_offset = ns_now -
265 mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); 274 mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift);
266 275
267 cyc2ns_write_end(cpu, data); 276 cyc2ns_write_end(cpu, data);
268 277
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 816488c0b97e..d388de72eaca 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -353,8 +353,12 @@ AVXcode: 1
35317: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) 35317: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
35418: Grp16 (1A) 35418: Grp16 (1A)
35519: 35519:
3561a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv 356# Intel SDM opcode map does not list MPX instructions. For now using Gv for
3571b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv 357# bnd registers and Ev for everything else is OK because the instruction
358# decoder does not use the information except as an indication that there is
359# a ModR/M byte.
3601a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
3611b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
3581c: 3621c:
3591d: 3631d:
3601e: 3641e:
@@ -732,6 +736,12 @@ bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
732be: vfnmsub231ps/d Vx,Hx,Wx (66),(v) 736be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
733bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1) 737bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
734# 0x0f 0x38 0xc0-0xff 738# 0x0f 0x38 0xc0-0xff
739c8: sha1nexte Vdq,Wdq
740c9: sha1msg1 Vdq,Wdq
741ca: sha1msg2 Vdq,Wdq
742cb: sha256rnds2 Vdq,Wdq
743cc: sha256msg1 Vdq,Wdq
744cd: sha256msg2 Vdq,Wdq
735db: VAESIMC Vdq,Wdq (66),(v1) 745db: VAESIMC Vdq,Wdq (66),(v1)
736dc: VAESENC Vdq,Hdq,Wdq (66),(v1) 746dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
737dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1) 747dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
@@ -790,6 +800,7 @@ AVXcode: 3
79061: vpcmpestri Vdq,Wdq,Ib (66),(v1) 80061: vpcmpestri Vdq,Wdq,Ib (66),(v1)
79162: vpcmpistrm Vdq,Wdq,Ib (66),(v1) 80162: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
79263: vpcmpistri Vdq,Wdq,Ib (66),(v1) 80263: vpcmpistri Vdq,Wdq,Ib (66),(v1)
803cc: sha1rnds4 Vdq,Wdq,Ib
793df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1) 804df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
794f0: RORX Gy,Ey,Ib (F2),(v) 805f0: RORX Gy,Ey,Ib (F2),(v)
795EndTable 806EndTable
@@ -874,7 +885,7 @@ GrpTable: Grp7
8742: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) 8852: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
8753: LIDT Ms 8863: LIDT Ms
8764: SMSW Mw/Rv 8874: SMSW Mw/Rv
8775: 8885: rdpkru (110),(11B) | wrpkru (111),(11B)
8786: LMSW Ew 8896: LMSW Ew
8797: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B) 8907: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
880EndTable 891EndTable
@@ -888,6 +899,9 @@ EndTable
888 899
889GrpTable: Grp9 900GrpTable: Grp9
8901: CMPXCHG8B/16B Mq/Mdq 9011: CMPXCHG8B/16B Mq/Mdq
9023: xrstors
9034: xsavec
9045: xsaves
8916: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B) 9056: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
8927: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B) 9067: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
893EndTable 907EndTable
@@ -932,8 +946,8 @@ GrpTable: Grp15
9323: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B) 9463: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
9334: XSAVE 9474: XSAVE
9345: XRSTOR | lfence (11B) 9485: XRSTOR | lfence (11B)
9356: XSAVEOPT | mfence (11B) 9496: XSAVEOPT | clwb (66) | mfence (11B)
9367: clflush | sfence (11B) 9507: clflush | clflushopt (66) | sfence (11B) | pcommit (66),(11B)
937EndTable 951EndTable
938 952
939GrpTable: Grp16 953GrpTable: Grp16
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 1d8a83df153a..1f37cb2b56a9 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -693,14 +693,12 @@ void free_initmem(void)
693#ifdef CONFIG_BLK_DEV_INITRD 693#ifdef CONFIG_BLK_DEV_INITRD
694void __init free_initrd_mem(unsigned long start, unsigned long end) 694void __init free_initrd_mem(unsigned long start, unsigned long end)
695{ 695{
696#ifdef CONFIG_MICROCODE_EARLY
697 /* 696 /*
698 * Remember, initrd memory may contain microcode or other useful things. 697 * Remember, initrd memory may contain microcode or other useful things.
699 * Before we lose initrd mem, we need to find a place to hold them 698 * Before we lose initrd mem, we need to find a place to hold them
700 * now that normal virtual memory is enabled. 699 * now that normal virtual memory is enabled.
701 */ 700 */
702 save_microcode_in_initrd(); 701 save_microcode_in_initrd();
703#endif
704 702
705 /* 703 /*
706 * end could be not aligned, and We can not align that, 704 * end could be not aligned, and We can not align that,
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 2c44c0792301..050a092b8d9a 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -647,9 +647,12 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
647 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) 647 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
648 set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot))); 648 set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
649 649
650 if (pfn_range_is_mapped(PFN_DOWN(__pa(address)), 650 if (virt_addr_valid(address)) {
651 PFN_DOWN(__pa(address)) + 1)) 651 unsigned long pfn = PFN_DOWN(__pa(address));
652 split_page_count(level); 652
653 if (pfn_range_is_mapped(pfn, pfn + 1))
654 split_page_count(level);
655 }
653 656
654 /* 657 /*
655 * Install the new, split up pagetable. 658 * Install the new, split up pagetable.
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index d7f997f7c26d..ea48449b2e63 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -50,11 +50,16 @@ void __init efi_bgrt_init(void)
50 bgrt_tab->version); 50 bgrt_tab->version);
51 return; 51 return;
52 } 52 }
53 if (bgrt_tab->status != 1) { 53 if (bgrt_tab->status & 0xfe) {
54 pr_err("Ignoring BGRT: invalid status %u (expected 1)\n", 54 pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n",
55 bgrt_tab->status); 55 bgrt_tab->status);
56 return; 56 return;
57 } 57 }
58 if (bgrt_tab->status != 1) {
59 pr_debug("Ignoring BGRT: invalid status %u (expected 1)\n",
60 bgrt_tab->status);
61 return;
62 }
58 if (bgrt_tab->image_type != 0) { 63 if (bgrt_tab->image_type != 0) {
59 pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n", 64 pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
60 bgrt_tab->image_type); 65 bgrt_tab->image_type);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 6a28ded74211..ad285404ea7f 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -194,7 +194,7 @@ static void __init do_add_efi_memmap(void)
194int __init efi_memblock_x86_reserve_range(void) 194int __init efi_memblock_x86_reserve_range(void)
195{ 195{
196 struct efi_info *e = &boot_params.efi_info; 196 struct efi_info *e = &boot_params.efi_info;
197 unsigned long pmap; 197 phys_addr_t pmap;
198 198
199 if (efi_enabled(EFI_PARAVIRT)) 199 if (efi_enabled(EFI_PARAVIRT))
200 return 0; 200 return 0;
@@ -209,7 +209,7 @@ int __init efi_memblock_x86_reserve_range(void)
209#else 209#else
210 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32)); 210 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
211#endif 211#endif
212 memmap.phys_map = (void *)pmap; 212 memmap.phys_map = pmap;
213 memmap.nr_map = e->efi_memmap_size / 213 memmap.nr_map = e->efi_memmap_size /
214 e->efi_memdesc_size; 214 e->efi_memdesc_size;
215 memmap.desc_size = e->efi_memdesc_size; 215 memmap.desc_size = e->efi_memdesc_size;
@@ -222,7 +222,7 @@ int __init efi_memblock_x86_reserve_range(void)
222 return 0; 222 return 0;
223} 223}
224 224
225static void __init print_efi_memmap(void) 225void __init efi_print_memmap(void)
226{ 226{
227#ifdef EFI_DEBUG 227#ifdef EFI_DEBUG
228 efi_memory_desc_t *md; 228 efi_memory_desc_t *md;
@@ -524,7 +524,7 @@ void __init efi_init(void)
524 return; 524 return;
525 525
526 if (efi_enabled(EFI_DBG)) 526 if (efi_enabled(EFI_DBG))
527 print_efi_memmap(); 527 efi_print_memmap();
528 528
529 efi_esrt_init(); 529 efi_esrt_init();
530} 530}
@@ -1017,24 +1017,6 @@ u32 efi_mem_type(unsigned long phys_addr)
1017 return 0; 1017 return 0;
1018} 1018}
1019 1019
1020u64 efi_mem_attributes(unsigned long phys_addr)
1021{
1022 efi_memory_desc_t *md;
1023 void *p;
1024
1025 if (!efi_enabled(EFI_MEMMAP))
1026 return 0;
1027
1028 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
1029 md = p;
1030 if ((md->phys_addr <= phys_addr) &&
1031 (phys_addr < (md->phys_addr +
1032 (md->num_pages << EFI_PAGE_SHIFT))))
1033 return md->attribute;
1034 }
1035 return 0;
1036}
1037
1038static int __init arch_parse_efi_cmdline(char *str) 1020static int __init arch_parse_efi_cmdline(char *str)
1039{ 1021{
1040 if (!str) { 1022 if (!str) {
@@ -1044,8 +1026,6 @@ static int __init arch_parse_efi_cmdline(char *str)
1044 1026
1045 if (parse_option_str(str, "old_map")) 1027 if (parse_option_str(str, "old_map"))
1046 set_bit(EFI_OLD_MEMMAP, &efi.flags); 1028 set_bit(EFI_OLD_MEMMAP, &efi.flags);
1047 if (parse_option_str(str, "debug"))
1048 set_bit(EFI_DBG, &efi.flags);
1049 1029
1050 return 0; 1030 return 0;
1051} 1031}
diff --git a/arch/x86/ras/Kconfig b/arch/x86/ras/Kconfig
index 10fea5fc821e..df280da34825 100644
--- a/arch/x86/ras/Kconfig
+++ b/arch/x86/ras/Kconfig
@@ -1,11 +1,9 @@
1config AMD_MCE_INJ 1config AMD_MCE_INJ
2 tristate "Simple MCE injection interface for AMD processors" 2 tristate "Simple MCE injection interface for AMD processors"
3 depends on RAS && EDAC_DECODE_MCE && DEBUG_FS 3 depends on RAS && EDAC_DECODE_MCE && DEBUG_FS && AMD_NB
4 default n 4 default n
5 help 5 help
6 This is a simple debugfs interface to inject MCEs and test different 6 This is a simple debugfs interface to inject MCEs and test different
7 aspects of the MCE handling code. 7 aspects of the MCE handling code.
8 8
9 WARNING: Do not even assume this interface is staying stable! 9 WARNING: Do not even assume this interface is staying stable!
10
11
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c
index 17e35b5bf779..55d38cfa46c2 100644
--- a/arch/x86/ras/mce_amd_inj.c
+++ b/arch/x86/ras/mce_amd_inj.c
@@ -17,7 +17,11 @@
17#include <linux/cpu.h> 17#include <linux/cpu.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/uaccess.h> 19#include <linux/uaccess.h>
20#include <linux/pci.h>
21
20#include <asm/mce.h> 22#include <asm/mce.h>
23#include <asm/amd_nb.h>
24#include <asm/irq_vectors.h>
21 25
22#include "../kernel/cpu/mcheck/mce-internal.h" 26#include "../kernel/cpu/mcheck/mce-internal.h"
23 27
@@ -30,16 +34,21 @@ static struct dentry *dfs_inj;
30static u8 n_banks; 34static u8 n_banks;
31 35
32#define MAX_FLAG_OPT_SIZE 3 36#define MAX_FLAG_OPT_SIZE 3
37#define NBCFG 0x44
33 38
34enum injection_type { 39enum injection_type {
35 SW_INJ = 0, /* SW injection, simply decode the error */ 40 SW_INJ = 0, /* SW injection, simply decode the error */
36 HW_INJ, /* Trigger a #MC */ 41 HW_INJ, /* Trigger a #MC */
42 DFR_INT_INJ, /* Trigger Deferred error interrupt */
43 THR_INT_INJ, /* Trigger threshold interrupt */
37 N_INJ_TYPES, 44 N_INJ_TYPES,
38}; 45};
39 46
40static const char * const flags_options[] = { 47static const char * const flags_options[] = {
41 [SW_INJ] = "sw", 48 [SW_INJ] = "sw",
42 [HW_INJ] = "hw", 49 [HW_INJ] = "hw",
50 [DFR_INT_INJ] = "df",
51 [THR_INT_INJ] = "th",
43 NULL 52 NULL
44}; 53};
45 54
@@ -129,12 +138,9 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf,
129{ 138{
130 char buf[MAX_FLAG_OPT_SIZE], *__buf; 139 char buf[MAX_FLAG_OPT_SIZE], *__buf;
131 int err; 140 int err;
132 size_t ret;
133 141
134 if (cnt > MAX_FLAG_OPT_SIZE) 142 if (cnt > MAX_FLAG_OPT_SIZE)
135 cnt = MAX_FLAG_OPT_SIZE; 143 return -EINVAL;
136
137 ret = cnt;
138 144
139 if (copy_from_user(&buf, ubuf, cnt)) 145 if (copy_from_user(&buf, ubuf, cnt))
140 return -EFAULT; 146 return -EFAULT;
@@ -150,9 +156,9 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf,
150 return err; 156 return err;
151 } 157 }
152 158
153 *ppos += ret; 159 *ppos += cnt;
154 160
155 return ret; 161 return cnt;
156} 162}
157 163
158static const struct file_operations flags_fops = { 164static const struct file_operations flags_fops = {
@@ -185,6 +191,55 @@ static void trigger_mce(void *info)
185 asm volatile("int $18"); 191 asm volatile("int $18");
186} 192}
187 193
194static void trigger_dfr_int(void *info)
195{
196 asm volatile("int %0" :: "i" (DEFERRED_ERROR_VECTOR));
197}
198
199static void trigger_thr_int(void *info)
200{
201 asm volatile("int %0" :: "i" (THRESHOLD_APIC_VECTOR));
202}
203
204static u32 get_nbc_for_node(int node_id)
205{
206 struct cpuinfo_x86 *c = &boot_cpu_data;
207 u32 cores_per_node;
208
209 cores_per_node = c->x86_max_cores / amd_get_nodes_per_socket();
210
211 return cores_per_node * node_id;
212}
213
214static void toggle_nb_mca_mst_cpu(u16 nid)
215{
216 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
217 u32 val;
218 int err;
219
220 if (!F3)
221 return;
222
223 err = pci_read_config_dword(F3, NBCFG, &val);
224 if (err) {
225 pr_err("%s: Error reading F%dx%03x.\n",
226 __func__, PCI_FUNC(F3->devfn), NBCFG);
227 return;
228 }
229
230 if (val & BIT(27))
231 return;
232
233 pr_err("%s: Set D18F3x44[NbMcaToMstCpuEn] which BIOS hasn't done.\n",
234 __func__);
235
236 val |= BIT(27);
237 err = pci_write_config_dword(F3, NBCFG, val);
238 if (err)
239 pr_err("%s: Error writing F%dx%03x.\n",
240 __func__, PCI_FUNC(F3->devfn), NBCFG);
241}
242
188static void do_inject(void) 243static void do_inject(void)
189{ 244{
190 u64 mcg_status = 0; 245 u64 mcg_status = 0;
@@ -205,6 +260,26 @@ static void do_inject(void)
205 if (!(i_mce.status & MCI_STATUS_PCC)) 260 if (!(i_mce.status & MCI_STATUS_PCC))
206 mcg_status |= MCG_STATUS_RIPV; 261 mcg_status |= MCG_STATUS_RIPV;
207 262
263 /*
264 * Ensure necessary status bits for deferred errors:
265 * - MCx_STATUS[Deferred]: make sure it is a deferred error
266 * - MCx_STATUS[UC] cleared: deferred errors are _not_ UC
267 */
268 if (inj_type == DFR_INT_INJ) {
269 i_mce.status |= MCI_STATUS_DEFERRED;
270 i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
271 }
272
273 /*
274 * For multi node CPUs, logging and reporting of bank 4 errors happens
275 * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
276 * Fam10h and later BKDGs.
277 */
278 if (static_cpu_has(X86_FEATURE_AMD_DCM) && b == 4) {
279 toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
280 cpu = get_nbc_for_node(amd_get_nb_id(cpu));
281 }
282
208 get_online_cpus(); 283 get_online_cpus();
209 if (!cpu_online(cpu)) 284 if (!cpu_online(cpu))
210 goto err; 285 goto err;
@@ -225,7 +300,16 @@ static void do_inject(void)
225 300
226 toggle_hw_mce_inject(cpu, false); 301 toggle_hw_mce_inject(cpu, false);
227 302
228 smp_call_function_single(cpu, trigger_mce, NULL, 0); 303 switch (inj_type) {
304 case DFR_INT_INJ:
305 smp_call_function_single(cpu, trigger_dfr_int, NULL, 0);
306 break;
307 case THR_INT_INJ:
308 smp_call_function_single(cpu, trigger_thr_int, NULL, 0);
309 break;
310 default:
311 smp_call_function_single(cpu, trigger_mce, NULL, 0);
312 }
229 313
230err: 314err:
231 put_online_cpus(); 315 put_online_cpus();
@@ -290,6 +374,11 @@ static const char readme_msg[] =
290"\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n" 374"\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
291"\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n" 375"\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
292"\t before injecting.\n" 376"\t before injecting.\n"
377"\t - \"df\": Trigger APIC interrupt for Deferred error. Causes deferred \n"
378"\t error APIC interrupt handler to handle the error if the feature is \n"
379"\t is present in hardware. \n"
380"\t - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n"
381"\t APIC interrupt handler to handle the error. \n"
293"\n"; 382"\n";
294 383
295static ssize_t 384static ssize_t
diff --git a/arch/x86/um/asm/syscall.h b/arch/x86/um/asm/syscall.h
index 9fe77b7b5a0e..81d6562ce01d 100644
--- a/arch/x86/um/asm/syscall.h
+++ b/arch/x86/um/asm/syscall.h
@@ -3,6 +3,10 @@
3 3
4#include <uapi/linux/audit.h> 4#include <uapi/linux/audit.h>
5 5
6typedef asmlinkage long (*sys_call_ptr_t)(unsigned long, unsigned long,
7 unsigned long, unsigned long,
8 unsigned long, unsigned long);
9
6static inline int syscall_get_arch(void) 10static inline int syscall_get_arch(void)
7{ 11{
8#ifdef CONFIG_X86_32 12#ifdef CONFIG_X86_32
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
index 9701a4fd7bf2..836a1eb5df43 100644
--- a/arch/x86/um/ldt.c
+++ b/arch/x86/um/ldt.c
@@ -12,7 +12,10 @@
12#include <skas.h> 12#include <skas.h>
13#include <sysdep/tls.h> 13#include <sysdep/tls.h>
14 14
15extern int modify_ldt(int func, void *ptr, unsigned long bytecount); 15static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
16{
17 return syscall(__NR_modify_ldt, func, ptr, bytecount);
18}
16 19
17static long write_ldt_entry(struct mm_id *mm_idp, int func, 20static long write_ldt_entry(struct mm_id *mm_idp, int func,
18 struct user_desc *desc, void **addr, int done) 21 struct user_desc *desc, void **addr, int done)
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index bd16d6c370ec..439c0994b696 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -7,6 +7,7 @@
7#include <linux/sys.h> 7#include <linux/sys.h>
8#include <linux/cache.h> 8#include <linux/cache.h>
9#include <generated/user_constants.h> 9#include <generated/user_constants.h>
10#include <asm/syscall.h>
10 11
11#define __NO_STUBS 12#define __NO_STUBS
12 13
@@ -24,15 +25,13 @@
24 25
25#define old_mmap sys_old_mmap 26#define old_mmap sys_old_mmap
26 27
27#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void sym(void) ; 28#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
28#include <asm/syscalls_32.h> 29#include <asm/syscalls_32.h>
29 30
30#undef __SYSCALL_I386 31#undef __SYSCALL_I386
31#define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, 32#define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
32 33
33typedef asmlinkage void (*sys_call_ptr_t)(void); 34extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
34
35extern asmlinkage void sys_ni_syscall(void);
36 35
37const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { 36const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
38 /* 37 /*
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index a75d8700472a..b74ea6c2c0e7 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -7,6 +7,7 @@
7#include <linux/sys.h> 7#include <linux/sys.h>
8#include <linux/cache.h> 8#include <linux/cache.h>
9#include <generated/user_constants.h> 9#include <generated/user_constants.h>
10#include <asm/syscall.h>
10 11
11#define __NO_STUBS 12#define __NO_STUBS
12 13
@@ -37,15 +38,13 @@
37#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat) 38#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
38#define __SYSCALL_X32(nr, sym, compat) /* Not supported */ 39#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
39 40
40#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; 41#define __SYSCALL_64(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
41#include <asm/syscalls_64.h> 42#include <asm/syscalls_64.h>
42 43
43#undef __SYSCALL_64 44#undef __SYSCALL_64
44#define __SYSCALL_64(nr, sym, compat) [ nr ] = sym, 45#define __SYSCALL_64(nr, sym, compat) [ nr ] = sym,
45 46
46typedef void (*sys_call_ptr_t)(void); 47extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
47
48extern void sys_ni_syscall(void);
49 48
50const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { 49const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
51 /* 50 /*
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1c30e4ab1022..63320b6d35bc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -965,17 +965,8 @@ char * __init xen_auto_xlated_memory_setup(void)
965static void __init fiddle_vdso(void) 965static void __init fiddle_vdso(void)
966{ 966{
967#ifdef CONFIG_X86_32 967#ifdef CONFIG_X86_32
968 /* 968 u32 *mask = vdso_image_32.data +
969 * This could be called before selected_vdso32 is initialized, so 969 vdso_image_32.sym_VDSO32_NOTE_MASK;
970 * just fiddle with both possible images. vdso_image_32_syscall
971 * can't be selected, since it only exists on 64-bit systems.
972 */
973 u32 *mask;
974 mask = vdso_image_32_int80.data +
975 vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
976 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
977 mask = vdso_image_32_sysenter.data +
978 vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
979 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; 970 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
980#endif 971#endif
981} 972}
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 93795d047303..fd8017ce298a 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -47,7 +47,7 @@
47 * 47 *
48 * Atomically reads the value of @v. 48 * Atomically reads the value of @v.
49 */ 49 */
50#define atomic_read(v) ACCESS_ONCE((v)->counter) 50#define atomic_read(v) READ_ONCE((v)->counter)
51 51
52/** 52/**
53 * atomic_set - set atomic variable 53 * atomic_set - set atomic variable
@@ -56,7 +56,7 @@
56 * 56 *
57 * Atomically sets the value of @v to @i. 57 * Atomically sets the value of @v to @i.
58 */ 58 */
59#define atomic_set(v,i) ((v)->counter = (i)) 59#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
60 60
61#if XCHAL_HAVE_S32C1I 61#if XCHAL_HAVE_S32C1I
62#define ATOMIC_OP(op) \ 62#define ATOMIC_OP(op) \
diff --git a/block/blk-core.c b/block/blk-core.c
index 2eb722d48773..18e92a6645e2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -576,7 +576,7 @@ void blk_cleanup_queue(struct request_queue *q)
576 q->queue_lock = &q->__queue_lock; 576 q->queue_lock = &q->__queue_lock;
577 spin_unlock_irq(lock); 577 spin_unlock_irq(lock);
578 578
579 bdi_destroy(&q->backing_dev_info); 579 bdi_unregister(&q->backing_dev_info);
580 580
581 /* @q is and will stay empty, shutdown and put */ 581 /* @q is and will stay empty, shutdown and put */
582 blk_put_queue(q); 582 blk_put_queue(q);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index bd40292e5009..9ebf65379556 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -26,13 +26,6 @@ static void bio_batch_end_io(struct bio *bio)
26 bio_put(bio); 26 bio_put(bio);
27} 27}
28 28
29/*
30 * Ensure that max discard sectors doesn't overflow bi_size and hopefully
31 * it is of the proper granularity as long as the granularity is a power
32 * of two.
33 */
34#define MAX_BIO_SECTORS ((1U << 31) >> 9)
35
36/** 29/**
37 * blkdev_issue_discard - queue a discard 30 * blkdev_issue_discard - queue a discard
38 * @bdev: blockdev to issue discard for 31 * @bdev: blockdev to issue discard for
@@ -50,6 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
50 DECLARE_COMPLETION_ONSTACK(wait); 43 DECLARE_COMPLETION_ONSTACK(wait);
51 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
52 int type = REQ_WRITE | REQ_DISCARD; 45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int granularity;
47 int alignment;
53 struct bio_batch bb; 48 struct bio_batch bb;
54 struct bio *bio; 49 struct bio *bio;
55 int ret = 0; 50 int ret = 0;
@@ -61,6 +56,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
61 if (!blk_queue_discard(q)) 56 if (!blk_queue_discard(q))
62 return -EOPNOTSUPP; 57 return -EOPNOTSUPP;
63 58
59 /* Zero-sector (unknown) and one-sector granularities are the same. */
60 granularity = max(q->limits.discard_granularity >> 9, 1U);
61 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
62
64 if (flags & BLKDEV_DISCARD_SECURE) { 63 if (flags & BLKDEV_DISCARD_SECURE) {
65 if (!blk_queue_secdiscard(q)) 64 if (!blk_queue_secdiscard(q))
66 return -EOPNOTSUPP; 65 return -EOPNOTSUPP;
@@ -74,7 +73,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
74 blk_start_plug(&plug); 73 blk_start_plug(&plug);
75 while (nr_sects) { 74 while (nr_sects) {
76 unsigned int req_sects; 75 unsigned int req_sects;
77 sector_t end_sect; 76 sector_t end_sect, tmp;
78 77
79 bio = bio_alloc(gfp_mask, 1); 78 bio = bio_alloc(gfp_mask, 1);
80 if (!bio) { 79 if (!bio) {
@@ -82,8 +81,22 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
82 break; 81 break;
83 } 82 }
84 83
85 req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS); 84 /* Make sure bi_size doesn't overflow */
85 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
86
87 /*
88 * If splitting a request, and the next starting sector would be
89 * misaligned, stop the discard at the previous aligned sector.
90 */
86 end_sect = sector + req_sects; 91 end_sect = sector + req_sects;
92 tmp = end_sect;
93 if (req_sects < nr_sects &&
94 sector_div(tmp, granularity) != alignment) {
95 end_sect = end_sect - alignment;
96 sector_div(end_sect, granularity);
97 end_sect = end_sect * granularity + alignment;
98 req_sects = end_sect - sector;
99 }
87 100
88 bio->bi_iter.bi_sector = sector; 101 bio->bi_iter.bi_sector = sector;
89 bio->bi_end_io = bio_batch_end_io; 102 bio->bi_end_io = bio_batch_end_io;
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index ed96474d75cb..ec2d11915142 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -641,6 +641,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
641{ 641{
642 bt_free(&tags->bitmap_tags); 642 bt_free(&tags->bitmap_tags);
643 bt_free(&tags->breserved_tags); 643 bt_free(&tags->breserved_tags);
644 free_cpumask_var(tags->cpumask);
644 kfree(tags); 645 kfree(tags);
645} 646}
646 647
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7785ae96267a..85f014327342 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2296,10 +2296,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2296 int i; 2296 int i;
2297 2297
2298 for (i = 0; i < set->nr_hw_queues; i++) { 2298 for (i = 0; i < set->nr_hw_queues; i++) {
2299 if (set->tags[i]) { 2299 if (set->tags[i])
2300 blk_mq_free_rq_map(set, set->tags[i], i); 2300 blk_mq_free_rq_map(set, set->tags[i], i);
2301 free_cpumask_var(set->tags[i]->cpumask);
2302 }
2303 } 2301 }
2304 2302
2305 kfree(set->tags); 2303 kfree(set->tags);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3e44a9da2a13..07b42f5ad797 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -540,6 +540,7 @@ static void blk_release_queue(struct kobject *kobj)
540 struct request_queue *q = 540 struct request_queue *q =
541 container_of(kobj, struct request_queue, kobj); 541 container_of(kobj, struct request_queue, kobj);
542 542
543 bdi_exit(&q->backing_dev_info);
543 blkcg_exit_queue(q); 544 blkcg_exit_queue(q);
544 545
545 if (q->elevator) { 546 if (q->elevator) {
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index b788f169cc98..b4ffc5be1a93 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -706,7 +706,7 @@ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
706err: 706err:
707 if (err != -EAGAIN) 707 if (err != -EAGAIN)
708 break; 708 break;
709 if (signal_pending(current)) { 709 if (fatal_signal_pending(current)) {
710 err = -EINTR; 710 err = -EINTR;
711 break; 711 break;
712 } 712 }
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d130b41dbaea..59bf491fe3d8 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -345,7 +345,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
345 crypto_alg_tested(larval->alg.cra_driver_name, 0); 345 crypto_alg_tested(larval->alg.cra_driver_name, 0);
346 } 346 }
347 347
348 err = wait_for_completion_interruptible(&larval->completion); 348 err = wait_for_completion_killable(&larval->completion);
349 WARN_ON(err); 349 WARN_ON(err);
350 350
351out: 351out:
diff --git a/crypto/api.c b/crypto/api.c
index afe4610afc4b..bbc147cb5dec 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -172,7 +172,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
172 struct crypto_larval *larval = (void *)alg; 172 struct crypto_larval *larval = (void *)alg;
173 long timeout; 173 long timeout;
174 174
175 timeout = wait_for_completion_interruptible_timeout( 175 timeout = wait_for_completion_killable_timeout(
176 &larval->completion, 60 * HZ); 176 &larval->completion, 60 * HZ);
177 177
178 alg = larval->adult; 178 alg = larval->adult;
@@ -445,7 +445,7 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
445err: 445err:
446 if (err != -EAGAIN) 446 if (err != -EAGAIN)
447 break; 447 break;
448 if (signal_pending(current)) { 448 if (fatal_signal_pending(current)) {
449 err = -EINTR; 449 err = -EINTR;
450 break; 450 break;
451 } 451 }
@@ -562,7 +562,7 @@ void *crypto_alloc_tfm(const char *alg_name,
562err: 562err:
563 if (err != -EAGAIN) 563 if (err != -EAGAIN)
564 break; 564 break;
565 if (signal_pending(current)) { 565 if (fatal_signal_pending(current)) {
566 err = -EINTR; 566 err = -EINTR;
567 break; 567 break;
568 } 568 }
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index d94d99ffe8b9..237f3795cfaa 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -375,7 +375,7 @@ static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
375 err = PTR_ERR(alg); 375 err = PTR_ERR(alg);
376 if (err != -EAGAIN) 376 if (err != -EAGAIN)
377 break; 377 break;
378 if (signal_pending(current)) { 378 if (fatal_signal_pending(current)) {
379 err = -EINTR; 379 err = -EINTR;
380 break; 380 break;
381 } 381 }
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 23981ac1c6c2..3dd9c462d22a 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -157,11 +157,15 @@ static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
157 157
158static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) 158static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
159{ 159{
160 unsigned long vaddr; 160 unsigned long vaddr, paddr;
161 pgprot_t prot;
161 162
162 vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); 163 vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
163 ioremap_page_range(vaddr, vaddr + PAGE_SIZE, 164
164 pfn << PAGE_SHIFT, PAGE_KERNEL); 165 paddr = pfn << PAGE_SHIFT;
166 prot = arch_apei_get_mem_attribute(paddr);
167
168 ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot);
165 169
166 return (void __iomem *)vaddr; 170 return (void __iomem *)vaddr;
167} 171}
diff --git a/drivers/acpi/gsi.c b/drivers/acpi/gsi.c
index 38208f2d0e69..fa4585a6914e 100644
--- a/drivers/acpi/gsi.c
+++ b/drivers/acpi/gsi.c
@@ -11,9 +11,12 @@
11#include <linux/acpi.h> 11#include <linux/acpi.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/irqdomain.h> 13#include <linux/irqdomain.h>
14#include <linux/of.h>
14 15
15enum acpi_irq_model_id acpi_irq_model; 16enum acpi_irq_model_id acpi_irq_model;
16 17
18static struct fwnode_handle *acpi_gsi_domain_id;
19
17static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity) 20static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity)
18{ 21{
19 switch (polarity) { 22 switch (polarity) {
@@ -45,12 +48,10 @@ static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity)
45 */ 48 */
46int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) 49int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
47{ 50{
48 /* 51 struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
49 * Only default domain is supported at present, always find 52 DOMAIN_BUS_ANY);
50 * the mapping corresponding to default domain by passing NULL 53
51 * as irq_domain parameter 54 *irq = irq_find_mapping(d, gsi);
52 */
53 *irq = irq_find_mapping(NULL, gsi);
54 /* 55 /*
55 * *irq == 0 means no mapping, that should 56 * *irq == 0 means no mapping, that should
56 * be reported as a failure 57 * be reported as a failure
@@ -72,23 +73,19 @@ EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
72int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, 73int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
73 int polarity) 74 int polarity)
74{ 75{
75 unsigned int irq; 76 struct irq_fwspec fwspec;
76 unsigned int irq_type = acpi_gsi_get_irq_type(trigger, polarity);
77 77
78 /* 78 if (WARN_ON(!acpi_gsi_domain_id)) {
79 * There is no way at present to look-up the IRQ domain on ACPI, 79 pr_warn("GSI: No registered irqchip, giving up\n");
80 * hence always create mapping referring to the default domain
81 * by passing NULL as irq_domain parameter
82 */
83 irq = irq_create_mapping(NULL, gsi);
84 if (!irq)
85 return -EINVAL; 80 return -EINVAL;
81 }
86 82
87 /* Set irq type if specified and different than the current one */ 83 fwspec.fwnode = acpi_gsi_domain_id;
88 if (irq_type != IRQ_TYPE_NONE && 84 fwspec.param[0] = gsi;
89 irq_type != irq_get_trigger_type(irq)) 85 fwspec.param[1] = acpi_gsi_get_irq_type(trigger, polarity);
90 irq_set_irq_type(irq, irq_type); 86 fwspec.param_count = 2;
91 return irq; 87
88 return irq_create_fwspec_mapping(&fwspec);
92} 89}
93EXPORT_SYMBOL_GPL(acpi_register_gsi); 90EXPORT_SYMBOL_GPL(acpi_register_gsi);
94 91
@@ -98,8 +95,23 @@ EXPORT_SYMBOL_GPL(acpi_register_gsi);
98 */ 95 */
99void acpi_unregister_gsi(u32 gsi) 96void acpi_unregister_gsi(u32 gsi)
100{ 97{
101 int irq = irq_find_mapping(NULL, gsi); 98 struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
99 DOMAIN_BUS_ANY);
100 int irq = irq_find_mapping(d, gsi);
102 101
103 irq_dispose_mapping(irq); 102 irq_dispose_mapping(irq);
104} 103}
105EXPORT_SYMBOL_GPL(acpi_unregister_gsi); 104EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
105
106/**
107 * acpi_set_irq_model - Setup the GSI irqdomain information
108 * @model: the value assigned to acpi_irq_model
109 * @fwnode: the irq_domain identifier for mapping and looking up
110 * GSI interrupts
111 */
112void __init acpi_set_irq_model(enum acpi_irq_model_id model,
113 struct fwnode_handle *fwnode)
114{
115 acpi_irq_model = model;
116 acpi_gsi_domain_id = fwnode;
117}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index be0eb4639128..a641cf3ccad6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -322,6 +322,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
322 goto probe_failed; 322 goto probe_failed;
323 } 323 }
324 324
325 pinctrl_init_done(dev);
326
325 if (dev->pm_domain && dev->pm_domain->sync) 327 if (dev->pm_domain && dev->pm_domain->sync)
326 dev->pm_domain->sync(dev); 328 dev->pm_domain->sync(dev);
327 329
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 950fff9ce453..a12ff9863d7e 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
187 * global one. Requires architecture specific dev_get_cma_area() helper 187 * global one. Requires architecture specific dev_get_cma_area() helper
188 * function. 188 * function.
189 */ 189 */
190struct page *dma_alloc_from_contiguous(struct device *dev, int count, 190struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
191 unsigned int align) 191 unsigned int align)
192{ 192{
193 if (align > CONFIG_CMA_ALIGNMENT) 193 if (align > CONFIG_CMA_ALIGNMENT)
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
index 5fb74b43848e..076297592754 100644
--- a/drivers/base/pinctrl.c
+++ b/drivers/base/pinctrl.c
@@ -42,9 +42,20 @@ int pinctrl_bind_pins(struct device *dev)
42 goto cleanup_get; 42 goto cleanup_get;
43 } 43 }
44 44
45 ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state); 45 dev->pins->init_state = pinctrl_lookup_state(dev->pins->p,
46 PINCTRL_STATE_INIT);
47 if (IS_ERR(dev->pins->init_state)) {
48 /* Not supplying this state is perfectly legal */
49 dev_dbg(dev, "no init pinctrl state\n");
50
51 ret = pinctrl_select_state(dev->pins->p,
52 dev->pins->default_state);
53 } else {
54 ret = pinctrl_select_state(dev->pins->p, dev->pins->init_state);
55 }
56
46 if (ret) { 57 if (ret) {
47 dev_dbg(dev, "failed to activate default pinctrl state\n"); 58 dev_dbg(dev, "failed to activate initial pinctrl state\n");
48 goto cleanup_get; 59 goto cleanup_get;
49 } 60 }
50 61
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 134483daac25..5df4575b5ba7 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -152,7 +152,7 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec,
152 152
153/** 153/**
154 * platform_msi_create_irq_domain - Create a platform MSI interrupt domain 154 * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
155 * @np: Optional device-tree node of the interrupt controller 155 * @fwnode: Optional fwnode of the interrupt controller
156 * @info: MSI domain info 156 * @info: MSI domain info
157 * @parent: Parent irq domain 157 * @parent: Parent irq domain
158 * 158 *
@@ -162,7 +162,7 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec,
162 * Returns: 162 * Returns:
163 * A domain pointer or NULL in case of failure. 163 * A domain pointer or NULL in case of failure.
164 */ 164 */
165struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, 165struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
166 struct msi_domain_info *info, 166 struct msi_domain_info *info,
167 struct irq_domain *parent) 167 struct irq_domain *parent)
168{ 168{
@@ -173,7 +173,7 @@ struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
173 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 173 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
174 platform_msi_update_chip_ops(info); 174 platform_msi_update_chip_ops(info);
175 175
176 domain = msi_create_irq_domain(np, info, parent); 176 domain = msi_create_irq_domain(fwnode, info, parent);
177 if (domain) 177 if (domain)
178 domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; 178 domain->bus_token = DOMAIN_BUS_PLATFORM_MSI;
179 179
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index cc557886ab23..3250e53473a3 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -59,6 +59,7 @@ struct regmap {
59 regmap_lock lock; 59 regmap_lock lock;
60 regmap_unlock unlock; 60 regmap_unlock unlock;
61 void *lock_arg; /* This is passed to lock/unlock functions */ 61 void *lock_arg; /* This is passed to lock/unlock functions */
62 gfp_t alloc_flags;
62 63
63 struct device *dev; /* Device we do I/O on */ 64 struct device *dev; /* Device we do I/O on */
64 void *work_buf; /* Scratch buffer used to format I/O */ 65 void *work_buf; /* Scratch buffer used to format I/O */
@@ -98,6 +99,8 @@ struct regmap {
98 99
99 int (*reg_read)(void *context, unsigned int reg, unsigned int *val); 100 int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
100 int (*reg_write)(void *context, unsigned int reg, unsigned int val); 101 int (*reg_write)(void *context, unsigned int reg, unsigned int val);
102 int (*reg_update_bits)(void *context, unsigned int reg,
103 unsigned int mask, unsigned int val);
101 104
102 bool defer_caching; 105 bool defer_caching;
103 106
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 4c55cfbad19e..3f0a7e262d69 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -30,7 +30,7 @@ static LIST_HEAD(regmap_debugfs_early_list);
30static DEFINE_MUTEX(regmap_debugfs_early_lock); 30static DEFINE_MUTEX(regmap_debugfs_early_lock);
31 31
32/* Calculate the length of a fixed format */ 32/* Calculate the length of a fixed format */
33static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) 33static size_t regmap_calc_reg_len(int max_val)
34{ 34{
35 return snprintf(NULL, 0, "%x", max_val); 35 return snprintf(NULL, 0, "%x", max_val);
36} 36}
@@ -173,8 +173,7 @@ static inline void regmap_calc_tot_len(struct regmap *map,
173{ 173{
174 /* Calculate the length of a fixed format */ 174 /* Calculate the length of a fixed format */
175 if (!map->debugfs_tot_len) { 175 if (!map->debugfs_tot_len) {
176 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register, 176 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register),
177 buf, count);
178 map->debugfs_val_len = 2 * map->format.val_bytes; 177 map->debugfs_val_len = 2 * map->format.val_bytes;
179 map->debugfs_tot_len = map->debugfs_reg_len + 178 map->debugfs_tot_len = map->debugfs_reg_len +
180 map->debugfs_val_len + 3; /* : \n */ 179 map->debugfs_val_len + 3; /* : \n */
@@ -338,6 +337,7 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
338 char *buf; 337 char *buf;
339 char *entry; 338 char *entry;
340 int ret; 339 int ret;
340 unsigned entry_len;
341 341
342 if (*ppos < 0 || !count) 342 if (*ppos < 0 || !count)
343 return -EINVAL; 343 return -EINVAL;
@@ -365,18 +365,15 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
365 p = 0; 365 p = 0;
366 mutex_lock(&map->cache_lock); 366 mutex_lock(&map->cache_lock);
367 list_for_each_entry(c, &map->debugfs_off_cache, list) { 367 list_for_each_entry(c, &map->debugfs_off_cache, list) {
368 snprintf(entry, PAGE_SIZE, "%x-%x", 368 entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
369 c->base_reg, c->max_reg); 369 c->base_reg, c->max_reg);
370 if (p >= *ppos) { 370 if (p >= *ppos) {
371 if (buf_pos + 1 + strlen(entry) > count) 371 if (buf_pos + entry_len > count)
372 break; 372 break;
373 snprintf(buf + buf_pos, count - buf_pos, 373 memcpy(buf + buf_pos, entry, entry_len);
374 "%s", entry); 374 buf_pos += entry_len;
375 buf_pos += strlen(entry);
376 buf[buf_pos] = '\n';
377 buf_pos++;
378 } 375 }
379 p += strlen(entry) + 1; 376 p += entry_len;
380 } 377 }
381 mutex_unlock(&map->cache_lock); 378 mutex_unlock(&map->cache_lock);
382 379
@@ -420,7 +417,7 @@ static ssize_t regmap_access_read_file(struct file *file,
420 return -ENOMEM; 417 return -ENOMEM;
421 418
422 /* Calculate the length of a fixed format */ 419 /* Calculate the length of a fixed format */
423 reg_len = regmap_calc_reg_len(map->max_register, buf, count); 420 reg_len = regmap_calc_reg_len(map->max_register);
424 tot_len = reg_len + 10; /* ': R W V P\n' */ 421 tot_len = reg_len + 10; /* ': R W V P\n' */
425 422
426 for (i = 0; i <= map->max_register; i += map->reg_stride) { 423 for (i = 0; i <= map->max_register; i += map->reg_stride) {
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 38d1f72d869c..8d16db533527 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -63,6 +63,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
63 struct regmap *map = d->map; 63 struct regmap *map = d->map;
64 int i, ret; 64 int i, ret;
65 u32 reg; 65 u32 reg;
66 u32 unmask_offset;
66 67
67 if (d->chip->runtime_pm) { 68 if (d->chip->runtime_pm) {
68 ret = pm_runtime_get_sync(map->dev); 69 ret = pm_runtime_get_sync(map->dev);
@@ -79,12 +80,28 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
79 for (i = 0; i < d->chip->num_regs; i++) { 80 for (i = 0; i < d->chip->num_regs; i++) {
80 reg = d->chip->mask_base + 81 reg = d->chip->mask_base +
81 (i * map->reg_stride * d->irq_reg_stride); 82 (i * map->reg_stride * d->irq_reg_stride);
82 if (d->chip->mask_invert) 83 if (d->chip->mask_invert) {
83 ret = regmap_update_bits(d->map, reg, 84 ret = regmap_update_bits(d->map, reg,
84 d->mask_buf_def[i], ~d->mask_buf[i]); 85 d->mask_buf_def[i], ~d->mask_buf[i]);
85 else 86 } else if (d->chip->unmask_base) {
87 /* set mask with mask_base register */
88 ret = regmap_update_bits(d->map, reg,
89 d->mask_buf_def[i], ~d->mask_buf[i]);
90 if (ret < 0)
91 dev_err(d->map->dev,
92 "Failed to sync unmasks in %x\n",
93 reg);
94 unmask_offset = d->chip->unmask_base -
95 d->chip->mask_base;
96 /* clear mask with unmask_base register */
97 ret = regmap_update_bits(d->map,
98 reg + unmask_offset,
99 d->mask_buf_def[i],
100 d->mask_buf[i]);
101 } else {
86 ret = regmap_update_bits(d->map, reg, 102 ret = regmap_update_bits(d->map, reg,
87 d->mask_buf_def[i], d->mask_buf[i]); 103 d->mask_buf_def[i], d->mask_buf[i]);
104 }
88 if (ret != 0) 105 if (ret != 0)
89 dev_err(d->map->dev, "Failed to sync masks in %x\n", 106 dev_err(d->map->dev, "Failed to sync masks in %x\n",
90 reg); 107 reg);
@@ -116,7 +133,11 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
116 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { 133 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
117 reg = d->chip->ack_base + 134 reg = d->chip->ack_base +
118 (i * map->reg_stride * d->irq_reg_stride); 135 (i * map->reg_stride * d->irq_reg_stride);
119 ret = regmap_write(map, reg, d->mask_buf[i]); 136 /* some chips ack by write 0 */
137 if (d->chip->ack_invert)
138 ret = regmap_write(map, reg, ~d->mask_buf[i]);
139 else
140 ret = regmap_write(map, reg, d->mask_buf[i]);
120 if (ret != 0) 141 if (ret != 0)
121 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", 142 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
122 reg, ret); 143 reg, ret);
@@ -339,6 +360,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
339 int i; 360 int i;
340 int ret = -ENOMEM; 361 int ret = -ENOMEM;
341 u32 reg; 362 u32 reg;
363 u32 unmask_offset;
342 364
343 if (chip->num_regs <= 0) 365 if (chip->num_regs <= 0)
344 return -EINVAL; 366 return -EINVAL;
@@ -420,7 +442,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
420 if (chip->mask_invert) 442 if (chip->mask_invert)
421 ret = regmap_update_bits(map, reg, 443 ret = regmap_update_bits(map, reg,
422 d->mask_buf[i], ~d->mask_buf[i]); 444 d->mask_buf[i], ~d->mask_buf[i]);
423 else 445 else if (d->chip->unmask_base) {
446 unmask_offset = d->chip->unmask_base -
447 d->chip->mask_base;
448 ret = regmap_update_bits(d->map,
449 reg + unmask_offset,
450 d->mask_buf[i],
451 d->mask_buf[i]);
452 } else
424 ret = regmap_update_bits(map, reg, 453 ret = regmap_update_bits(map, reg,
425 d->mask_buf[i], d->mask_buf[i]); 454 d->mask_buf[i], d->mask_buf[i]);
426 if (ret != 0) { 455 if (ret != 0) {
@@ -445,7 +474,11 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
445 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { 474 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
446 reg = chip->ack_base + 475 reg = chip->ack_base +
447 (i * map->reg_stride * d->irq_reg_stride); 476 (i * map->reg_stride * d->irq_reg_stride);
448 ret = regmap_write(map, reg, 477 if (chip->ack_invert)
478 ret = regmap_write(map, reg,
479 ~(d->status_buf[i] & d->mask_buf[i]));
480 else
481 ret = regmap_write(map, reg,
449 d->status_buf[i] & d->mask_buf[i]); 482 d->status_buf[i] & d->mask_buf[i]);
450 if (ret != 0) { 483 if (ret != 0) {
451 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 484 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index afaf56200674..4ac63c0e50c7 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -561,6 +561,16 @@ struct regmap *__regmap_init(struct device *dev,
561 } 561 }
562 map->lock_arg = map; 562 map->lock_arg = map;
563 } 563 }
564
565 /*
566 * When we write in fast-paths with regmap_bulk_write() don't allocate
567 * scratch buffers with sleeping allocations.
568 */
569 if ((bus && bus->fast_io) || config->fast_io)
570 map->alloc_flags = GFP_ATOMIC;
571 else
572 map->alloc_flags = GFP_KERNEL;
573
564 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 574 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
565 map->format.pad_bytes = config->pad_bits / 8; 575 map->format.pad_bytes = config->pad_bits / 8;
566 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 576 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
@@ -619,6 +629,7 @@ struct regmap *__regmap_init(struct device *dev,
619 goto skip_format_initialization; 629 goto skip_format_initialization;
620 } else { 630 } else {
621 map->reg_read = _regmap_bus_read; 631 map->reg_read = _regmap_bus_read;
632 map->reg_update_bits = bus->reg_update_bits;
622 } 633 }
623 634
624 reg_endian = regmap_get_reg_endian(bus, config); 635 reg_endian = regmap_get_reg_endian(bus, config);
@@ -1786,7 +1797,7 @@ out:
1786 if (!val_count) 1797 if (!val_count)
1787 return -EINVAL; 1798 return -EINVAL;
1788 1799
1789 wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); 1800 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
1790 if (!wval) { 1801 if (!wval) {
1791 dev_err(map->dev, "Error in memory allocation\n"); 1802 dev_err(map->dev, "Error in memory allocation\n");
1792 return -ENOMEM; 1803 return -ENOMEM;
@@ -2509,20 +2520,26 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2509 int ret; 2520 int ret;
2510 unsigned int tmp, orig; 2521 unsigned int tmp, orig;
2511 2522
2512 ret = _regmap_read(map, reg, &orig); 2523 if (change)
2513 if (ret != 0) 2524 *change = false;
2514 return ret;
2515 2525
2516 tmp = orig & ~mask; 2526 if (regmap_volatile(map, reg) && map->reg_update_bits) {
2517 tmp |= val & mask; 2527 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2518 2528 if (ret == 0 && change)
2519 if (force_write || (tmp != orig)) {
2520 ret = _regmap_write(map, reg, tmp);
2521 if (change)
2522 *change = true; 2529 *change = true;
2523 } else { 2530 } else {
2524 if (change) 2531 ret = _regmap_read(map, reg, &orig);
2525 *change = false; 2532 if (ret != 0)
2533 return ret;
2534
2535 tmp = orig & ~mask;
2536 tmp |= val & mask;
2537
2538 if (force_write || (tmp != orig)) {
2539 ret = _regmap_write(map, reg, tmp);
2540 if (ret == 0 && change)
2541 *change = true;
2542 }
2526 } 2543 }
2527 2544
2528 return ret; 2545 return ret;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 293495a75d3d..1b87623381e2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -60,6 +60,7 @@ struct nbd_device {
60 bool disconnect; /* a disconnect has been requested by user */ 60 bool disconnect; /* a disconnect has been requested by user */
61 61
62 struct timer_list timeout_timer; 62 struct timer_list timeout_timer;
63 spinlock_t tasks_lock;
63 struct task_struct *task_recv; 64 struct task_struct *task_recv;
64 struct task_struct *task_send; 65 struct task_struct *task_send;
65 66
@@ -140,21 +141,23 @@ static void sock_shutdown(struct nbd_device *nbd)
140static void nbd_xmit_timeout(unsigned long arg) 141static void nbd_xmit_timeout(unsigned long arg)
141{ 142{
142 struct nbd_device *nbd = (struct nbd_device *)arg; 143 struct nbd_device *nbd = (struct nbd_device *)arg;
143 struct task_struct *task; 144 unsigned long flags;
144 145
145 if (list_empty(&nbd->queue_head)) 146 if (list_empty(&nbd->queue_head))
146 return; 147 return;
147 148
148 nbd->disconnect = true; 149 nbd->disconnect = true;
149 150
150 task = READ_ONCE(nbd->task_recv); 151 spin_lock_irqsave(&nbd->tasks_lock, flags);
151 if (task) 152
152 force_sig(SIGKILL, task); 153 if (nbd->task_recv)
154 force_sig(SIGKILL, nbd->task_recv);
153 155
154 task = READ_ONCE(nbd->task_send); 156 if (nbd->task_send)
155 if (task)
156 force_sig(SIGKILL, nbd->task_send); 157 force_sig(SIGKILL, nbd->task_send);
157 158
159 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
160
158 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n"); 161 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
159} 162}
160 163
@@ -403,17 +406,24 @@ static int nbd_thread_recv(struct nbd_device *nbd)
403{ 406{
404 struct request *req; 407 struct request *req;
405 int ret; 408 int ret;
409 unsigned long flags;
406 410
407 BUG_ON(nbd->magic != NBD_MAGIC); 411 BUG_ON(nbd->magic != NBD_MAGIC);
408 412
409 sk_set_memalloc(nbd->sock->sk); 413 sk_set_memalloc(nbd->sock->sk);
410 414
415 spin_lock_irqsave(&nbd->tasks_lock, flags);
411 nbd->task_recv = current; 416 nbd->task_recv = current;
417 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
412 418
413 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 419 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
414 if (ret) { 420 if (ret) {
415 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); 421 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
422
423 spin_lock_irqsave(&nbd->tasks_lock, flags);
416 nbd->task_recv = NULL; 424 nbd->task_recv = NULL;
425 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
426
417 return ret; 427 return ret;
418 } 428 }
419 429
@@ -429,7 +439,9 @@ static int nbd_thread_recv(struct nbd_device *nbd)
429 439
430 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 440 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
431 441
442 spin_lock_irqsave(&nbd->tasks_lock, flags);
432 nbd->task_recv = NULL; 443 nbd->task_recv = NULL;
444 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
433 445
434 if (signal_pending(current)) { 446 if (signal_pending(current)) {
435 siginfo_t info; 447 siginfo_t info;
@@ -534,8 +546,11 @@ static int nbd_thread_send(void *data)
534{ 546{
535 struct nbd_device *nbd = data; 547 struct nbd_device *nbd = data;
536 struct request *req; 548 struct request *req;
549 unsigned long flags;
537 550
551 spin_lock_irqsave(&nbd->tasks_lock, flags);
538 nbd->task_send = current; 552 nbd->task_send = current;
553 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
539 554
540 set_user_nice(current, MIN_NICE); 555 set_user_nice(current, MIN_NICE);
541 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { 556 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
@@ -572,7 +587,15 @@ static int nbd_thread_send(void *data)
572 nbd_handle_req(nbd, req); 587 nbd_handle_req(nbd, req);
573 } 588 }
574 589
590 spin_lock_irqsave(&nbd->tasks_lock, flags);
575 nbd->task_send = NULL; 591 nbd->task_send = NULL;
592 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
593
594 /* Clear maybe pending signals */
595 if (signal_pending(current)) {
596 siginfo_t info;
597 dequeue_signal_lock(current, &current->blocked, &info);
598 }
576 599
577 return 0; 600 return 0;
578} 601}
@@ -1052,6 +1075,7 @@ static int __init nbd_init(void)
1052 nbd_dev[i].magic = NBD_MAGIC; 1075 nbd_dev[i].magic = NBD_MAGIC;
1053 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); 1076 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
1054 spin_lock_init(&nbd_dev[i].queue_lock); 1077 spin_lock_init(&nbd_dev[i].queue_lock);
1078 spin_lock_init(&nbd_dev[i].tasks_lock);
1055 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 1079 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
1056 mutex_init(&nbd_dev[i].tx_lock); 1080 mutex_init(&nbd_dev[i].tx_lock);
1057 init_timer(&nbd_dev[i].timeout_timer); 1081 init_timer(&nbd_dev[i].timeout_timer);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 6f04771f1019..ccc0c1f93daa 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -603,27 +603,31 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
603 struct nvme_iod *iod = ctx; 603 struct nvme_iod *iod = ctx;
604 struct request *req = iod_get_private(iod); 604 struct request *req = iod_get_private(iod);
605 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 605 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
606
607 u16 status = le16_to_cpup(&cqe->status) >> 1; 606 u16 status = le16_to_cpup(&cqe->status) >> 1;
607 bool requeue = false;
608 int error = 0;
608 609
609 if (unlikely(status)) { 610 if (unlikely(status)) {
610 if (!(status & NVME_SC_DNR || blk_noretry_request(req)) 611 if (!(status & NVME_SC_DNR || blk_noretry_request(req))
611 && (jiffies - req->start_time) < req->timeout) { 612 && (jiffies - req->start_time) < req->timeout) {
612 unsigned long flags; 613 unsigned long flags;
613 614
615 requeue = true;
614 blk_mq_requeue_request(req); 616 blk_mq_requeue_request(req);
615 spin_lock_irqsave(req->q->queue_lock, flags); 617 spin_lock_irqsave(req->q->queue_lock, flags);
616 if (!blk_queue_stopped(req->q)) 618 if (!blk_queue_stopped(req->q))
617 blk_mq_kick_requeue_list(req->q); 619 blk_mq_kick_requeue_list(req->q);
618 spin_unlock_irqrestore(req->q->queue_lock, flags); 620 spin_unlock_irqrestore(req->q->queue_lock, flags);
619 return; 621 goto release_iod;
620 } 622 }
621 623
622 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 624 if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
623 if (cmd_rq->ctx == CMD_CTX_CANCELLED) 625 if (cmd_rq->ctx == CMD_CTX_CANCELLED)
624 status = -EINTR; 626 error = -EINTR;
627 else
628 error = status;
625 } else { 629 } else {
626 status = nvme_error_status(status); 630 error = nvme_error_status(status);
627 } 631 }
628 } 632 }
629 633
@@ -635,8 +639,9 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
635 if (cmd_rq->aborted) 639 if (cmd_rq->aborted)
636 dev_warn(nvmeq->dev->dev, 640 dev_warn(nvmeq->dev->dev,
637 "completing aborted command with status:%04x\n", 641 "completing aborted command with status:%04x\n",
638 status); 642 error);
639 643
644release_iod:
640 if (iod->nents) { 645 if (iod->nents) {
641 dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents, 646 dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents,
642 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 647 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -649,7 +654,8 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
649 } 654 }
650 nvme_free_iod(nvmeq->dev, iod); 655 nvme_free_iod(nvmeq->dev, iod);
651 656
652 blk_mq_complete_request(req, status); 657 if (likely(!requeue))
658 blk_mq_complete_request(req, error);
653} 659}
654 660
655/* length is in bytes. gfp flags indicates whether we may sleep. */ 661/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -1804,7 +1810,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1804 1810
1805 length = (io.nblocks + 1) << ns->lba_shift; 1811 length = (io.nblocks + 1) << ns->lba_shift;
1806 meta_len = (io.nblocks + 1) * ns->ms; 1812 meta_len = (io.nblocks + 1) * ns->ms;
1807 metadata = (void __user *)(unsigned long)io.metadata; 1813 metadata = (void __user *)(uintptr_t)io.metadata;
1808 write = io.opcode & 1; 1814 write = io.opcode & 1;
1809 1815
1810 if (ns->ext) { 1816 if (ns->ext) {
@@ -1844,7 +1850,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1844 c.rw.metadata = cpu_to_le64(meta_dma); 1850 c.rw.metadata = cpu_to_le64(meta_dma);
1845 1851
1846 status = __nvme_submit_sync_cmd(ns->queue, &c, NULL, 1852 status = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
1847 (void __user *)io.addr, length, NULL, 0); 1853 (void __user *)(uintptr_t)io.addr, length, NULL, 0);
1848 unmap: 1854 unmap:
1849 if (meta) { 1855 if (meta) {
1850 if (status == NVME_SC_SUCCESS && !write) { 1856 if (status == NVME_SC_SUCCESS && !write) {
@@ -1886,7 +1892,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
1886 timeout = msecs_to_jiffies(cmd.timeout_ms); 1892 timeout = msecs_to_jiffies(cmd.timeout_ms);
1887 1893
1888 status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c, 1894 status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c,
1889 NULL, (void __user *)cmd.addr, cmd.data_len, 1895 NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
1890 &cmd.result, timeout); 1896 &cmd.result, timeout);
1891 if (status >= 0) { 1897 if (status >= 0) {
1892 if (put_user(cmd.result, &ucmd->result)) 1898 if (put_user(cmd.result, &ucmd->result))
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index f5e49b639818..128e7df5b807 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -96,6 +96,8 @@ static int atomic_dec_return_safe(atomic_t *v)
96#define RBD_MINORS_PER_MAJOR 256 96#define RBD_MINORS_PER_MAJOR 256
97#define RBD_SINGLE_MAJOR_PART_SHIFT 4 97#define RBD_SINGLE_MAJOR_PART_SHIFT 4
98 98
99#define RBD_MAX_PARENT_CHAIN_LEN 16
100
99#define RBD_SNAP_DEV_NAME_PREFIX "snap_" 101#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
100#define RBD_MAX_SNAP_NAME_LEN \ 102#define RBD_MAX_SNAP_NAME_LEN \
101 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1)) 103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
@@ -426,7 +428,7 @@ static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
426 size_t count); 428 size_t count);
427static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf, 429static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
428 size_t count); 430 size_t count);
429static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping); 431static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
430static void rbd_spec_put(struct rbd_spec *spec); 432static void rbd_spec_put(struct rbd_spec *spec);
431 433
432static int rbd_dev_id_to_minor(int dev_id) 434static int rbd_dev_id_to_minor(int dev_id)
@@ -3778,6 +3780,9 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
3778 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); 3780 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
3779 q->limits.discard_zeroes_data = 1; 3781 q->limits.discard_zeroes_data = 1;
3780 3782
3783 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3784 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
3785
3781 disk->queue = q; 3786 disk->queue = q;
3782 3787
3783 q->queuedata = rbd_dev; 3788 q->queuedata = rbd_dev;
@@ -5131,44 +5136,51 @@ out_err:
5131 return ret; 5136 return ret;
5132} 5137}
5133 5138
5134static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) 5139/*
5140 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5141 * rbd_dev_image_probe() recursion depth, which means it's also the
5142 * length of the already discovered part of the parent chain.
5143 */
5144static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5135{ 5145{
5136 struct rbd_device *parent = NULL; 5146 struct rbd_device *parent = NULL;
5137 struct rbd_spec *parent_spec;
5138 struct rbd_client *rbdc;
5139 int ret; 5147 int ret;
5140 5148
5141 if (!rbd_dev->parent_spec) 5149 if (!rbd_dev->parent_spec)
5142 return 0; 5150 return 0;
5143 /*
5144 * We need to pass a reference to the client and the parent
5145 * spec when creating the parent rbd_dev. Images related by
5146 * parent/child relationships always share both.
5147 */
5148 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5149 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5150 5151
5151 ret = -ENOMEM; 5152 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5152 parent = rbd_dev_create(rbdc, parent_spec, NULL); 5153 pr_info("parent chain is too long (%d)\n", depth);
5153 if (!parent) 5154 ret = -EINVAL;
5154 goto out_err; 5155 goto out_err;
5156 }
5155 5157
5156 ret = rbd_dev_image_probe(parent, false); 5158 parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
5159 NULL);
5160 if (!parent) {
5161 ret = -ENOMEM;
5162 goto out_err;
5163 }
5164
5165 /*
5166 * Images related by parent/child relationships always share
5167 * rbd_client and spec/parent_spec, so bump their refcounts.
5168 */
5169 __rbd_get_client(rbd_dev->rbd_client);
5170 rbd_spec_get(rbd_dev->parent_spec);
5171
5172 ret = rbd_dev_image_probe(parent, depth);
5157 if (ret < 0) 5173 if (ret < 0)
5158 goto out_err; 5174 goto out_err;
5175
5159 rbd_dev->parent = parent; 5176 rbd_dev->parent = parent;
5160 atomic_set(&rbd_dev->parent_ref, 1); 5177 atomic_set(&rbd_dev->parent_ref, 1);
5161
5162 return 0; 5178 return 0;
5179
5163out_err: 5180out_err:
5164 if (parent) { 5181 rbd_dev_unparent(rbd_dev);
5165 rbd_dev_unparent(rbd_dev); 5182 if (parent)
5166 rbd_dev_destroy(parent); 5183 rbd_dev_destroy(parent);
5167 } else {
5168 rbd_put_client(rbdc);
5169 rbd_spec_put(parent_spec);
5170 }
5171
5172 return ret; 5184 return ret;
5173} 5185}
5174 5186
@@ -5286,7 +5298,7 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5286 * parent), initiate a watch on its header object before using that 5298 * parent), initiate a watch on its header object before using that
5287 * object to get detailed information about the rbd image. 5299 * object to get detailed information about the rbd image.
5288 */ 5300 */
5289static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) 5301static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5290{ 5302{
5291 int ret; 5303 int ret;
5292 5304
@@ -5304,7 +5316,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5304 if (ret) 5316 if (ret)
5305 goto err_out_format; 5317 goto err_out_format;
5306 5318
5307 if (mapping) { 5319 if (!depth) {
5308 ret = rbd_dev_header_watch_sync(rbd_dev); 5320 ret = rbd_dev_header_watch_sync(rbd_dev);
5309 if (ret) { 5321 if (ret) {
5310 if (ret == -ENOENT) 5322 if (ret == -ENOENT)
@@ -5325,7 +5337,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5325 * Otherwise this is a parent image, identified by pool, image 5337 * Otherwise this is a parent image, identified by pool, image
5326 * and snap ids - need to fill in names for those ids. 5338 * and snap ids - need to fill in names for those ids.
5327 */ 5339 */
5328 if (mapping) 5340 if (!depth)
5329 ret = rbd_spec_fill_snap_id(rbd_dev); 5341 ret = rbd_spec_fill_snap_id(rbd_dev);
5330 else 5342 else
5331 ret = rbd_spec_fill_names(rbd_dev); 5343 ret = rbd_spec_fill_names(rbd_dev);
@@ -5347,12 +5359,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5347 * Need to warn users if this image is the one being 5359 * Need to warn users if this image is the one being
5348 * mapped and has a parent. 5360 * mapped and has a parent.
5349 */ 5361 */
5350 if (mapping && rbd_dev->parent_spec) 5362 if (!depth && rbd_dev->parent_spec)
5351 rbd_warn(rbd_dev, 5363 rbd_warn(rbd_dev,
5352 "WARNING: kernel layering is EXPERIMENTAL!"); 5364 "WARNING: kernel layering is EXPERIMENTAL!");
5353 } 5365 }
5354 5366
5355 ret = rbd_dev_probe_parent(rbd_dev); 5367 ret = rbd_dev_probe_parent(rbd_dev, depth);
5356 if (ret) 5368 if (ret)
5357 goto err_out_probe; 5369 goto err_out_probe;
5358 5370
@@ -5363,7 +5375,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5363err_out_probe: 5375err_out_probe:
5364 rbd_dev_unprobe(rbd_dev); 5376 rbd_dev_unprobe(rbd_dev);
5365err_out_watch: 5377err_out_watch:
5366 if (mapping) 5378 if (!depth)
5367 rbd_dev_header_unwatch_sync(rbd_dev); 5379 rbd_dev_header_unwatch_sync(rbd_dev);
5368out_header_name: 5380out_header_name:
5369 kfree(rbd_dev->header_name); 5381 kfree(rbd_dev->header_name);
@@ -5426,7 +5438,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
5426 spec = NULL; /* rbd_dev now owns this */ 5438 spec = NULL; /* rbd_dev now owns this */
5427 rbd_opts = NULL; /* rbd_dev now owns this */ 5439 rbd_opts = NULL; /* rbd_dev now owns this */
5428 5440
5429 rc = rbd_dev_image_probe(rbd_dev, true); 5441 rc = rbd_dev_image_probe(rbd_dev, 0);
5430 if (rc < 0) 5442 if (rc < 0)
5431 goto err_out_rbd_dev; 5443 goto err_out_rbd_dev;
5432 5444
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 611170896b8c..a69c02dadec0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1956,7 +1956,8 @@ static void blkback_changed(struct xenbus_device *dev,
1956 break; 1956 break;
1957 /* Missed the backend's Closing state -- fallthrough */ 1957 /* Missed the backend's Closing state -- fallthrough */
1958 case XenbusStateClosing: 1958 case XenbusStateClosing:
1959 blkfront_closing(info); 1959 if (info)
1960 blkfront_closing(info);
1960 break; 1961 break;
1961 } 1962 }
1962} 1963}
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 7d9879e166cf..7082c7268845 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1184,11 +1184,12 @@ static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb,
1184 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) 1184 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
1185 break; 1185 break;
1186 target = cpumask_any_but(cpu_online_mask, cpu); 1186 target = cpumask_any_but(cpu_online_mask, cpu);
1187 if (target < 0) 1187 if (target >= nr_cpu_ids)
1188 break; 1188 break;
1189 perf_pmu_migrate_context(&dt->pmu, cpu, target); 1189 perf_pmu_migrate_context(&dt->pmu, cpu, target);
1190 cpumask_set_cpu(target, &dt->cpu); 1190 cpumask_set_cpu(target, &dt->cpu);
1191 WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); 1191 if (ccn->irq)
1192 WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0);
1192 default: 1193 default:
1193 break; 1194 break;
1194 } 1195 }
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index c0eaf0973bd2..779b6ff0c7ad 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -333,7 +333,8 @@ int clk_add_alias(const char *alias, const char *alias_dev_name,
333 if (IS_ERR(r)) 333 if (IS_ERR(r))
334 return PTR_ERR(r); 334 return PTR_ERR(r);
335 335
336 l = clkdev_create(r, alias, "%s", alias_dev_name); 336 l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL,
337 alias_dev_name);
337 clk_put(r); 338 clk_put(r);
338 339
339 return l ? 0 : -ENODEV; 340 return l ? 0 : -ENODEV;
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 9b613426e968..bc24e5a002e7 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -45,8 +45,8 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
45#define PSECS_PER_SEC 1000000000000LL 45#define PSECS_PER_SEC 1000000000000LL
46 46
47/* 47/*
48 * Each fine delay is between 40ps-80ps. Assume each fine delay is 60ps to 48 * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to
49 * simplify calculations. So 45degs could be anywhere between 33deg and 66deg. 49 * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg.
50 */ 50 */
51#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60 51#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
52 52
@@ -69,7 +69,7 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
69 69
70 delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK); 70 delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
71 delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET; 71 delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
72 degrees += delay_num * factor / 10000; 72 degrees += DIV_ROUND_CLOSEST(delay_num * factor, 10000);
73 } 73 }
74 74
75 return degrees % 360; 75 return degrees % 360;
@@ -82,25 +82,41 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
82 u8 nineties, remainder; 82 u8 nineties, remainder;
83 u8 delay_num; 83 u8 delay_num;
84 u32 raw_value; 84 u32 raw_value;
85 u64 delay; 85 u32 delay;
86
87 /* allow 22 to be 22.5 */
88 degrees++;
89 /* floor to 22.5 increment */
90 degrees -= ((degrees) * 10 % 225) / 10;
91 86
92 nineties = degrees / 90; 87 nineties = degrees / 90;
93 /* 22.5 multiples */ 88 remainder = (degrees % 90);
94 remainder = (degrees % 90) / 22; 89
95 90 /*
96 delay = PSECS_PER_SEC; 91 * Due to the inexact nature of the "fine" delay, we might
97 do_div(delay, rate); 92 * actually go non-monotonic. We don't go _too_ monotonic
98 /* / 360 / 22.5 */ 93 * though, so we should be OK. Here are options of how we may
99 do_div(delay, 16); 94 * work:
100 do_div(delay, ROCKCHIP_MMC_DELAY_ELEMENT_PSEC); 95 *
101 96 * Ideally we end up with:
97 * 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0
98 *
99 * On one extreme (if delay is actually 44ps):
100 * .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0
101 * The other (if delay is actually 77ps):
102 * 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90
103 *
104 * It's possible we might make a delay that is up to 25
105 * degrees off from what we think we're making. That's OK
106 * though because we should be REALLY far from any bad range.
107 */
108
109 /*
110 * Convert to delay; do a little extra work to make sure we
111 * don't overflow 32-bit / 64-bit numbers.
112 */
113 delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */
102 delay *= remainder; 114 delay *= remainder;
103 delay_num = (u8) min(delay, 255ULL); 115 delay = DIV_ROUND_CLOSEST(delay,
116 (rate / 1000) * 36 *
117 (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10));
118
119 delay_num = (u8) min_t(u32, delay, 255);
104 120
105 raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0; 121 raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
106 raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET; 122 raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index a7726db13abb..50b68bc20720 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -279,6 +279,10 @@ config CLKSRC_MIPS_GIC
279 depends on MIPS_GIC 279 depends on MIPS_GIC
280 select CLKSRC_OF 280 select CLKSRC_OF
281 281
282config CLKSRC_TANGO_XTAL
283 bool
284 select CLKSRC_OF
285
282config CLKSRC_PXA 286config CLKSRC_PXA
283 def_bool y if ARCH_PXA || ARCH_SA1100 287 def_bool y if ARCH_PXA || ARCH_SA1100
284 select CLKSRC_OF if OF 288 select CLKSRC_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 5c00863c3e33..67bc996ca909 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -56,9 +56,11 @@ obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
56obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o 56obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o
57obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o 57obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
58obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o 58obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
59obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
59obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o 60obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
60obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o 61obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
61obj-$(CONFIG_H8300) += h8300_timer8.o 62obj-$(CONFIG_H8300) += h8300_timer8.o
62obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o 63obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o
63obj-$(CONFIG_H8300_TPU) += h8300_tpu.o 64obj-$(CONFIG_H8300_TPU) += h8300_tpu.o
64obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o 65obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
66obj-$(CONFIG_X86_NUMACHIP) += numachip.o
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 29ea50ac366a..a2cb6fae9295 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -60,7 +60,7 @@ static struct clock_event_device __percpu *gt_evt;
60 * different to the 32-bit upper value read previously, go back to step 2. 60 * different to the 32-bit upper value read previously, go back to step 2.
61 * Otherwise the 64-bit timer counter value is correct. 61 * Otherwise the 64-bit timer counter value is correct.
62 */ 62 */
63static u64 gt_counter_read(void) 63static u64 notrace _gt_counter_read(void)
64{ 64{
65 u64 counter; 65 u64 counter;
66 u32 lower; 66 u32 lower;
@@ -79,6 +79,11 @@ static u64 gt_counter_read(void)
79 return counter; 79 return counter;
80} 80}
81 81
82static u64 gt_counter_read(void)
83{
84 return _gt_counter_read();
85}
86
82/** 87/**
83 * To ensure that updates to comparator value register do not set the 88 * To ensure that updates to comparator value register do not set the
84 * Interrupt Status Register proceed as follows: 89 * Interrupt Status Register proceed as follows:
@@ -201,7 +206,7 @@ static struct clocksource gt_clocksource = {
201#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK 206#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
202static u64 notrace gt_sched_clock_read(void) 207static u64 notrace gt_sched_clock_read(void)
203{ 208{
204 return gt_counter_read(); 209 return _gt_counter_read();
205} 210}
206#endif 211#endif
207 212
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 7a97a34dba70..19bb1792d647 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -228,7 +228,6 @@ static int em_sti_register_clocksource(struct em_sti_priv *p)
228{ 228{
229 struct clocksource *cs = &p->cs; 229 struct clocksource *cs = &p->cs;
230 230
231 memset(cs, 0, sizeof(*cs));
232 cs->name = dev_name(&p->pdev->dev); 231 cs->name = dev_name(&p->pdev->dev);
233 cs->rating = 200; 232 cs->rating = 200;
234 cs->read = em_sti_clocksource_read; 233 cs->read = em_sti_clocksource_read;
@@ -285,7 +284,6 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
285{ 284{
286 struct clock_event_device *ced = &p->ced; 285 struct clock_event_device *ced = &p->ced;
287 286
288 memset(ced, 0, sizeof(*ced));
289 ced->name = dev_name(&p->pdev->dev); 287 ced->name = dev_name(&p->pdev->dev);
290 ced->features = CLOCK_EVT_FEAT_ONESHOT; 288 ced->features = CLOCK_EVT_FEAT_ONESHOT;
291 ced->rating = 200; 289 ced->rating = 200;
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 029f96ab131a..ff44082a0827 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -382,24 +382,28 @@ static void exynos4_mct_tick_start(unsigned long cycles,
382static int exynos4_tick_set_next_event(unsigned long cycles, 382static int exynos4_tick_set_next_event(unsigned long cycles,
383 struct clock_event_device *evt) 383 struct clock_event_device *evt)
384{ 384{
385 struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); 385 struct mct_clock_event_device *mevt;
386 386
387 mevt = container_of(evt, struct mct_clock_event_device, evt);
387 exynos4_mct_tick_start(cycles, mevt); 388 exynos4_mct_tick_start(cycles, mevt);
388
389 return 0; 389 return 0;
390} 390}
391 391
392static int set_state_shutdown(struct clock_event_device *evt) 392static int set_state_shutdown(struct clock_event_device *evt)
393{ 393{
394 exynos4_mct_tick_stop(this_cpu_ptr(&percpu_mct_tick)); 394 struct mct_clock_event_device *mevt;
395
396 mevt = container_of(evt, struct mct_clock_event_device, evt);
397 exynos4_mct_tick_stop(mevt);
395 return 0; 398 return 0;
396} 399}
397 400
398static int set_state_periodic(struct clock_event_device *evt) 401static int set_state_periodic(struct clock_event_device *evt)
399{ 402{
400 struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); 403 struct mct_clock_event_device *mevt;
401 unsigned long cycles_per_jiffy; 404 unsigned long cycles_per_jiffy;
402 405
406 mevt = container_of(evt, struct mct_clock_event_device, evt);
403 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) 407 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
404 >> evt->shift); 408 >> evt->shift);
405 exynos4_mct_tick_stop(mevt); 409 exynos4_mct_tick_stop(mevt);
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index ef434699c80a..10202f1fdfd7 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -118,7 +118,7 @@ static inline void ftm_reset_counter(void __iomem *base)
118 ftm_writel(0x00, base + FTM_CNT); 118 ftm_writel(0x00, base + FTM_CNT);
119} 119}
120 120
121static u64 ftm_read_sched_clock(void) 121static u64 notrace ftm_read_sched_clock(void)
122{ 122{
123 return ftm_readl(priv->clksrc_base + FTM_CNT); 123 return ftm_readl(priv->clksrc_base + FTM_CNT);
124} 124}
diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c
index 82941c1e9e33..0e076c6fc006 100644
--- a/drivers/clocksource/h8300_timer16.c
+++ b/drivers/clocksource/h8300_timer16.c
@@ -153,7 +153,6 @@ static int timer16_setup(struct timer16_priv *p, struct platform_device *pdev)
153 int ret, irq; 153 int ret, irq;
154 unsigned int ch; 154 unsigned int ch;
155 155
156 memset(p, 0, sizeof(*p));
157 p->pdev = pdev; 156 p->pdev = pdev;
158 157
159 res[REG_CH] = platform_get_resource(p->pdev, 158 res[REG_CH] = platform_get_resource(p->pdev,
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
index f9b3b7033a97..44375d8b9bc4 100644
--- a/drivers/clocksource/h8300_timer8.c
+++ b/drivers/clocksource/h8300_timer8.c
@@ -215,7 +215,6 @@ static int timer8_setup(struct timer8_priv *p,
215 int irq; 215 int irq;
216 int ret; 216 int ret;
217 217
218 memset(p, 0, sizeof(*p));
219 p->pdev = pdev; 218 p->pdev = pdev;
220 219
221 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); 220 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c
index 64195fdd78bf..5487410bfabb 100644
--- a/drivers/clocksource/h8300_tpu.c
+++ b/drivers/clocksource/h8300_tpu.c
@@ -123,7 +123,6 @@ static int __init tpu_setup(struct tpu_priv *p, struct platform_device *pdev)
123{ 123{
124 struct resource *res[2]; 124 struct resource *res[2];
125 125
126 memset(p, 0, sizeof(*p));
127 p->pdev = pdev; 126 p->pdev = pdev;
128 127
129 res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L); 128 res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L);
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 50f0641c65b6..fbfc74685e6a 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -24,6 +24,7 @@
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/of_address.h> 25#include <linux/of_address.h>
26#include <linux/of_irq.h> 26#include <linux/of_irq.h>
27#include <linux/sched_clock.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28 29
29#define GPT_IRQ_EN_REG 0x00 30#define GPT_IRQ_EN_REG 0x00
@@ -59,6 +60,13 @@ struct mtk_clock_event_device {
59 struct clock_event_device dev; 60 struct clock_event_device dev;
60}; 61};
61 62
63static void __iomem *gpt_sched_reg __read_mostly;
64
65static u64 notrace mtk_read_sched_clock(void)
66{
67 return readl_relaxed(gpt_sched_reg);
68}
69
62static inline struct mtk_clock_event_device *to_mtk_clk( 70static inline struct mtk_clock_event_device *to_mtk_clk(
63 struct clock_event_device *c) 71 struct clock_event_device *c)
64{ 72{
@@ -141,14 +149,6 @@ static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
141 return IRQ_HANDLED; 149 return IRQ_HANDLED;
142} 150}
143 151
144static void mtk_timer_global_reset(struct mtk_clock_event_device *evt)
145{
146 /* Disable all interrupts */
147 writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
148 /* Acknowledge all interrupts */
149 writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
150}
151
152static void 152static void
153mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option) 153mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
154{ 154{
@@ -168,6 +168,12 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
168{ 168{
169 u32 val; 169 u32 val;
170 170
171 /* Disable all interrupts */
172 writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
173
174 /* Acknowledge all spurious pending interrupts */
175 writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
176
171 val = readl(evt->gpt_base + GPT_IRQ_EN_REG); 177 val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
172 writel(val | GPT_IRQ_ENABLE(timer), 178 writel(val | GPT_IRQ_ENABLE(timer),
173 evt->gpt_base + GPT_IRQ_EN_REG); 179 evt->gpt_base + GPT_IRQ_EN_REG);
@@ -220,8 +226,6 @@ static void __init mtk_timer_init(struct device_node *node)
220 } 226 }
221 rate = clk_get_rate(clk); 227 rate = clk_get_rate(clk);
222 228
223 mtk_timer_global_reset(evt);
224
225 if (request_irq(evt->dev.irq, mtk_timer_interrupt, 229 if (request_irq(evt->dev.irq, mtk_timer_interrupt,
226 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { 230 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
227 pr_warn("failed to setup irq %d\n", evt->dev.irq); 231 pr_warn("failed to setup irq %d\n", evt->dev.irq);
@@ -234,6 +238,8 @@ static void __init mtk_timer_init(struct device_node *node)
234 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); 238 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
235 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), 239 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
236 node->name, rate, 300, 32, clocksource_mmio_readl_up); 240 node->name, rate, 300, 32, clocksource_mmio_readl_up);
241 gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC);
242 sched_clock_register(mtk_read_sched_clock, 32, rate);
237 243
238 /* Configure clock event */ 244 /* Configure clock event */
239 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); 245 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
diff --git a/drivers/clocksource/numachip.c b/drivers/clocksource/numachip.c
new file mode 100644
index 000000000000..4e0f11fd2617
--- /dev/null
+++ b/drivers/clocksource/numachip.c
@@ -0,0 +1,95 @@
1/*
2 *
3 * Copyright (C) 2015 Numascale AS. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/clockchips.h>
17
18#include <asm/irq.h>
19#include <asm/numachip/numachip.h>
20#include <asm/numachip/numachip_csr.h>
21
22static DEFINE_PER_CPU(struct clock_event_device, numachip2_ced);
23
24static cycles_t numachip2_timer_read(struct clocksource *cs)
25{
26 return numachip2_read64_lcsr(NUMACHIP2_TIMER_NOW);
27}
28
29static struct clocksource numachip2_clocksource = {
30 .name = "numachip2",
31 .rating = 295,
32 .read = numachip2_timer_read,
33 .mask = CLOCKSOURCE_MASK(64),
34 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
35 .mult = 1,
36 .shift = 0,
37};
38
39static int numachip2_set_next_event(unsigned long delta, struct clock_event_device *ced)
40{
41 numachip2_write64_lcsr(NUMACHIP2_TIMER_DEADLINE + numachip2_timer(),
42 delta);
43 return 0;
44}
45
46static struct clock_event_device numachip2_clockevent = {
47 .name = "numachip2",
48 .rating = 400,
49 .set_next_event = numachip2_set_next_event,
50 .features = CLOCK_EVT_FEAT_ONESHOT,
51 .mult = 1,
52 .shift = 0,
53 .min_delta_ns = 1250,
54 .max_delta_ns = LONG_MAX,
55};
56
57static void numachip_timer_interrupt(void)
58{
59 struct clock_event_device *ced = this_cpu_ptr(&numachip2_ced);
60
61 ced->event_handler(ced);
62}
63
64static __init void numachip_timer_each(struct work_struct *work)
65{
66 unsigned local_apicid = __this_cpu_read(x86_cpu_to_apicid) & 0xff;
67 struct clock_event_device *ced = this_cpu_ptr(&numachip2_ced);
68
69 /* Setup IPI vector to local core and relative timing mode */
70 numachip2_write64_lcsr(NUMACHIP2_TIMER_INT + numachip2_timer(),
71 (3 << 22) | (X86_PLATFORM_IPI_VECTOR << 14) |
72 (local_apicid << 6));
73
74 *ced = numachip2_clockevent;
75 ced->cpumask = cpumask_of(smp_processor_id());
76 clockevents_register_device(ced);
77}
78
79static int __init numachip_timer_init(void)
80{
81 if (numachip_system != 2)
82 return -ENODEV;
83
84 /* Reset timer */
85 numachip2_write64_lcsr(NUMACHIP2_TIMER_RESET, 0);
86 clocksource_register_hz(&numachip2_clocksource, NSEC_PER_SEC);
87
88 /* Setup per-cpu clockevents */
89 x86_platform_ipi_callback = numachip_timer_interrupt;
90 schedule_on_each_cpu(&numachip_timer_each);
91
92 return 0;
93}
94
95arch_initcall(numachip_timer_init);
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index bc90e13338cc..9502bc4c3f6d 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -307,7 +307,7 @@ static void samsung_clocksource_resume(struct clocksource *cs)
307 samsung_time_start(pwm.source_id, true); 307 samsung_time_start(pwm.source_id, true);
308} 308}
309 309
310static cycle_t samsung_clocksource_read(struct clocksource *c) 310static cycle_t notrace samsung_clocksource_read(struct clocksource *c)
311{ 311{
312 return ~readl_relaxed(pwm.source_reg); 312 return ~readl_relaxed(pwm.source_reg);
313} 313}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index ba73a6eb8d66..103c49362c68 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -962,7 +962,6 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
962 unsigned int i; 962 unsigned int i;
963 int ret; 963 int ret;
964 964
965 memset(cmt, 0, sizeof(*cmt));
966 cmt->pdev = pdev; 965 cmt->pdev = pdev;
967 raw_spin_lock_init(&cmt->lock); 966 raw_spin_lock_init(&cmt->lock);
968 967
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index f1985da8113f..53aa7e92a7d7 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -280,7 +280,9 @@ static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced)
280{ 280{
281 struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced); 281 struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
282 282
283 sh_mtu2_disable(ch); 283 if (clockevent_state_periodic(ced))
284 sh_mtu2_disable(ch);
285
284 return 0; 286 return 0;
285} 287}
286 288
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
new file mode 100644
index 000000000000..d297b30d2bc0
--- /dev/null
+++ b/drivers/clocksource/tango_xtal.c
@@ -0,0 +1,66 @@
1#include <linux/clocksource.h>
2#include <linux/sched_clock.h>
3#include <linux/of_address.h>
4#include <linux/printk.h>
5#include <linux/delay.h>
6#include <linux/init.h>
7#include <linux/clk.h>
8
9static void __iomem *xtal_in_cnt;
10static struct delay_timer delay_timer;
11
12static unsigned long notrace read_xtal_counter(void)
13{
14 return readl_relaxed(xtal_in_cnt);
15}
16
17static u64 notrace read_sched_clock(void)
18{
19 return read_xtal_counter();
20}
21
22static cycle_t read_clocksource(struct clocksource *cs)
23{
24 return read_xtal_counter();
25}
26
27static struct clocksource tango_xtal = {
28 .name = "tango-xtal",
29 .rating = 350,
30 .read = read_clocksource,
31 .mask = CLOCKSOURCE_MASK(32),
32 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
33};
34
35static void __init tango_clocksource_init(struct device_node *np)
36{
37 struct clk *clk;
38 int xtal_freq, ret;
39
40 xtal_in_cnt = of_iomap(np, 0);
41 if (xtal_in_cnt == NULL) {
42 pr_err("%s: invalid address\n", np->full_name);
43 return;
44 }
45
46 clk = of_clk_get(np, 0);
47 if (IS_ERR(clk)) {
48 pr_err("%s: invalid clock\n", np->full_name);
49 return;
50 }
51
52 xtal_freq = clk_get_rate(clk);
53 delay_timer.freq = xtal_freq;
54 delay_timer.read_current_timer = read_xtal_counter;
55
56 ret = clocksource_register_hz(&tango_xtal, xtal_freq);
57 if (ret != 0) {
58 pr_err("%s: registration failed\n", np->full_name);
59 return;
60 }
61
62 sched_clock_register(read_sched_clock, 32, xtal_freq);
63 register_current_timer_delay(&delay_timer);
64}
65
66CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 2162796fd504..d93ec3c4f139 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -45,6 +45,8 @@
45#include <linux/percpu.h> 45#include <linux/percpu.h>
46#include <linux/syscore_ops.h> 46#include <linux/syscore_ops.h>
47 47
48#include <asm/delay.h>
49
48/* 50/*
49 * Timer block registers. 51 * Timer block registers.
50 */ 52 */
@@ -249,6 +251,15 @@ struct syscore_ops armada_370_xp_timer_syscore_ops = {
249 .resume = armada_370_xp_timer_resume, 251 .resume = armada_370_xp_timer_resume,
250}; 252};
251 253
254static unsigned long armada_370_delay_timer_read(void)
255{
256 return ~readl(timer_base + TIMER0_VAL_OFF);
257}
258
259static struct delay_timer armada_370_delay_timer = {
260 .read_current_timer = armada_370_delay_timer_read,
261};
262
252static void __init armada_370_xp_timer_common_init(struct device_node *np) 263static void __init armada_370_xp_timer_common_init(struct device_node *np)
253{ 264{
254 u32 clr = 0, set = 0; 265 u32 clr = 0, set = 0;
@@ -287,6 +298,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
287 TIMER0_RELOAD_EN | enable_mask, 298 TIMER0_RELOAD_EN | enable_mask,
288 TIMER0_RELOAD_EN | enable_mask); 299 TIMER0_RELOAD_EN | enable_mask);
289 300
301 armada_370_delay_timer.freq = timer_clk;
302 register_current_timer_delay(&armada_370_delay_timer);
303
290 /* 304 /*
291 * Set scale and timer for sched_clock. 305 * Set scale and timer for sched_clock.
292 */ 306 */
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index 18d4266c2986..bba679900054 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -67,7 +67,8 @@ static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
67 writel(value, base + 0x20 * gpt_id + offset); 67 writel(value, base + 0x20 * gpt_id + offset);
68} 68}
69 69
70static cycle_t pistachio_clocksource_read_cycles(struct clocksource *cs) 70static cycle_t notrace
71pistachio_clocksource_read_cycles(struct clocksource *cs)
71{ 72{
72 struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); 73 struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
73 u32 counter, overflw; 74 u32 counter, overflw;
diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c
index e73947f0f86d..a536eeb634d8 100644
--- a/drivers/clocksource/timer-digicolor.c
+++ b/drivers/clocksource/timer-digicolor.c
@@ -143,7 +143,7 @@ static irqreturn_t digicolor_timer_interrupt(int irq, void *dev_id)
143 return IRQ_HANDLED; 143 return IRQ_HANDLED;
144} 144}
145 145
146static u64 digicolor_timer_sched_read(void) 146static u64 notrace digicolor_timer_sched_read(void)
147{ 147{
148 return ~readl(dc_timer_dev.base + COUNT(TIMER_B)); 148 return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
149} 149}
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 839aba92fc39..99ec96769dda 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -305,13 +305,14 @@ static int __init mxc_clockevent_init(struct imx_timer *imxtm)
305 struct irqaction *act = &imxtm->act; 305 struct irqaction *act = &imxtm->act;
306 306
307 ced->name = "mxc_timer1"; 307 ced->name = "mxc_timer1";
308 ced->features = CLOCK_EVT_FEAT_ONESHOT; 308 ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
309 ced->set_state_shutdown = mxc_shutdown; 309 ced->set_state_shutdown = mxc_shutdown;
310 ced->set_state_oneshot = mxc_set_oneshot; 310 ced->set_state_oneshot = mxc_set_oneshot;
311 ced->tick_resume = mxc_shutdown; 311 ced->tick_resume = mxc_shutdown;
312 ced->set_next_event = imxtm->gpt->set_next_event; 312 ced->set_next_event = imxtm->gpt->set_next_event;
313 ced->rating = 200; 313 ced->rating = 200;
314 ced->cpumask = cpumask_of(0); 314 ced->cpumask = cpumask_of(0);
315 ced->irq = imxtm->irq;
315 clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per), 316 clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
316 0xff, 0xfffffffe); 317 0xff, 0xfffffffe);
317 318
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index 78de982cc640..2854c663e8b5 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -73,7 +73,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
73} 73}
74 74
75/* read 64-bit timer counter */ 75/* read 64-bit timer counter */
76static cycle_t sirfsoc_timer_read(struct clocksource *cs) 76static cycle_t notrace sirfsoc_timer_read(struct clocksource *cs)
77{ 77{
78 u64 cycles; 78 u64 cycles;
79 79
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index f07ba9932171..a0e6c68536a1 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -52,7 +52,7 @@ static inline void pit_irq_acknowledge(void)
52 __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); 52 __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
53} 53}
54 54
55static u64 pit_read_sched_clock(void) 55static u64 notrace pit_read_sched_clock(void)
56{ 56{
57 return ~__raw_readl(clksrc_base + PITCVAL); 57 return ~__raw_readl(clksrc_base + PITCVAL);
58} 58}
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index ae3c5f3ce405..dbf53e08bdd1 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -12,6 +12,8 @@ obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
12edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o 12edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o
13edac_core-y += edac_module.o edac_device_sysfs.o 13edac_core-y += edac_module.o edac_device_sysfs.o
14 14
15edac_core-$(CONFIG_EDAC_DEBUG) += debugfs.o
16
15ifdef CONFIG_PCI 17ifdef CONFIG_PCI
16edac_core-y += edac_pci.o edac_pci_sysfs.o 18edac_core-y += edac_pci.o edac_pci_sysfs.o
17endif 19endif
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 23ef0917483c..929640981d8a 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -51,11 +51,9 @@ static const struct altr_sdram_prv_data c5_data = {
51 .ecc_irq_clr_mask = (CV_DRAMINTR_INTRCLR | CV_DRAMINTR_INTREN), 51 .ecc_irq_clr_mask = (CV_DRAMINTR_INTRCLR | CV_DRAMINTR_INTREN),
52 .ecc_cnt_rst_offset = CV_DRAMINTR_OFST, 52 .ecc_cnt_rst_offset = CV_DRAMINTR_OFST,
53 .ecc_cnt_rst_mask = CV_DRAMINTR_INTRCLR, 53 .ecc_cnt_rst_mask = CV_DRAMINTR_INTRCLR,
54#ifdef CONFIG_EDAC_DEBUG
55 .ce_ue_trgr_offset = CV_CTLCFG_OFST, 54 .ce_ue_trgr_offset = CV_CTLCFG_OFST,
56 .ce_set_mask = CV_CTLCFG_GEN_SB_ERR, 55 .ce_set_mask = CV_CTLCFG_GEN_SB_ERR,
57 .ue_set_mask = CV_CTLCFG_GEN_DB_ERR, 56 .ue_set_mask = CV_CTLCFG_GEN_DB_ERR,
58#endif
59}; 57};
60 58
61static const struct altr_sdram_prv_data a10_data = { 59static const struct altr_sdram_prv_data a10_data = {
@@ -72,11 +70,9 @@ static const struct altr_sdram_prv_data a10_data = {
72 .ecc_irq_clr_mask = (A10_INTSTAT_SBEERR | A10_INTSTAT_DBEERR), 70 .ecc_irq_clr_mask = (A10_INTSTAT_SBEERR | A10_INTSTAT_DBEERR),
73 .ecc_cnt_rst_offset = A10_ECCCTRL1_OFST, 71 .ecc_cnt_rst_offset = A10_ECCCTRL1_OFST,
74 .ecc_cnt_rst_mask = A10_ECC_CNT_RESET_MASK, 72 .ecc_cnt_rst_mask = A10_ECC_CNT_RESET_MASK,
75#ifdef CONFIG_EDAC_DEBUG
76 .ce_ue_trgr_offset = A10_DIAGINTTEST_OFST, 73 .ce_ue_trgr_offset = A10_DIAGINTTEST_OFST,
77 .ce_set_mask = A10_DIAGINT_TSERRA_MASK, 74 .ce_set_mask = A10_DIAGINT_TSERRA_MASK,
78 .ue_set_mask = A10_DIAGINT_TDERRA_MASK, 75 .ue_set_mask = A10_DIAGINT_TDERRA_MASK,
79#endif
80}; 76};
81 77
82static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id) 78static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
@@ -116,7 +112,6 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
116 return IRQ_NONE; 112 return IRQ_NONE;
117} 113}
118 114
119#ifdef CONFIG_EDAC_DEBUG
120static ssize_t altr_sdr_mc_err_inject_write(struct file *file, 115static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
121 const char __user *data, 116 const char __user *data,
122 size_t count, loff_t *ppos) 117 size_t count, loff_t *ppos)
@@ -191,14 +186,15 @@ static const struct file_operations altr_sdr_mc_debug_inject_fops = {
191 186
192static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci) 187static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
193{ 188{
194 if (mci->debugfs) 189 if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
195 debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci, 190 return;
196 &altr_sdr_mc_debug_inject_fops); 191
192 if (!mci->debugfs)
193 return;
194
195 edac_debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
196 &altr_sdr_mc_debug_inject_fops);
197} 197}
198#else
199static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
200{}
201#endif
202 198
203/* Get total memory size from Open Firmware DTB */ 199/* Get total memory size from Open Firmware DTB */
204static unsigned long get_total_mem(void) 200static unsigned long get_total_mem(void)
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 7b64dc7c4eb7..953077d3e4f3 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -30,8 +30,7 @@
30#define CV_CTLCFG_GEN_SB_ERR 0x2000 30#define CV_CTLCFG_GEN_SB_ERR 0x2000
31#define CV_CTLCFG_GEN_DB_ERR 0x4000 31#define CV_CTLCFG_GEN_DB_ERR 0x4000
32 32
33#define CV_CTLCFG_ECC_AUTO_EN (CV_CTLCFG_ECC_EN | \ 33#define CV_CTLCFG_ECC_AUTO_EN (CV_CTLCFG_ECC_EN)
34 CV_CTLCFG_ECC_CORR_EN)
35 34
36/* SDRAM Controller Address Width Register */ 35/* SDRAM Controller Address Width Register */
37#define CV_DRAMADDRW_OFST 0x2C 36#define CV_DRAMADDRW_OFST 0x2C
@@ -181,13 +180,11 @@ struct altr_sdram_prv_data {
181 int ecc_irq_clr_mask; 180 int ecc_irq_clr_mask;
182 int ecc_cnt_rst_offset; 181 int ecc_cnt_rst_offset;
183 int ecc_cnt_rst_mask; 182 int ecc_cnt_rst_mask;
184#ifdef CONFIG_EDAC_DEBUG
185 struct edac_dev_sysfs_attribute *eccmgr_sysfs_attr; 183 struct edac_dev_sysfs_attribute *eccmgr_sysfs_attr;
186 int ecc_enable_mask; 184 int ecc_enable_mask;
187 int ce_set_mask; 185 int ce_set_mask;
188 int ue_set_mask; 186 int ue_set_mask;
189 int ce_ue_trgr_offset; 187 int ce_ue_trgr_offset;
190#endif
191}; 188};
192 189
193/* Altera SDRAM Memory Controller data */ 190/* Altera SDRAM Memory Controller data */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 73aea40a9c89..9eee13ef83a5 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -173,7 +173,7 @@ static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
173 * scan the scrub rate mapping table for a close or matching bandwidth value to 173 * scan the scrub rate mapping table for a close or matching bandwidth value to
174 * issue. If requested is too big, then use last maximum value found. 174 * issue. If requested is too big, then use last maximum value found.
175 */ 175 */
176static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) 176static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
177{ 177{
178 u32 scrubval; 178 u32 scrubval;
179 int i; 179 int i;
@@ -201,7 +201,14 @@ static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
201 201
202 scrubval = scrubrates[i].scrubval; 202 scrubval = scrubrates[i].scrubval;
203 203
204 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F); 204 if (pvt->fam == 0x15 && pvt->model == 0x60) {
205 f15h_select_dct(pvt, 0);
206 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
207 f15h_select_dct(pvt, 1);
208 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
209 } else {
210 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
211 }
205 212
206 if (scrubval) 213 if (scrubval)
207 return scrubrates[i].bandwidth; 214 return scrubrates[i].bandwidth;
@@ -217,11 +224,15 @@ static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
217 if (pvt->fam == 0xf) 224 if (pvt->fam == 0xf)
218 min_scrubrate = 0x0; 225 min_scrubrate = 0x0;
219 226
220 /* Erratum #505 */ 227 if (pvt->fam == 0x15) {
221 if (pvt->fam == 0x15 && pvt->model < 0x10) 228 /* Erratum #505 */
222 f15h_select_dct(pvt, 0); 229 if (pvt->model < 0x10)
230 f15h_select_dct(pvt, 0);
223 231
224 return __set_scrub_rate(pvt->F3, bw, min_scrubrate); 232 if (pvt->model == 0x60)
233 min_scrubrate = 0x6;
234 }
235 return __set_scrub_rate(pvt, bw, min_scrubrate);
225} 236}
226 237
227static int get_scrub_rate(struct mem_ctl_info *mci) 238static int get_scrub_rate(struct mem_ctl_info *mci)
@@ -230,11 +241,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
230 u32 scrubval = 0; 241 u32 scrubval = 0;
231 int i, retval = -EINVAL; 242 int i, retval = -EINVAL;
232 243
233 /* Erratum #505 */ 244 if (pvt->fam == 0x15) {
234 if (pvt->fam == 0x15 && pvt->model < 0x10) 245 /* Erratum #505 */
235 f15h_select_dct(pvt, 0); 246 if (pvt->model < 0x10)
247 f15h_select_dct(pvt, 0);
236 248
237 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); 249 if (pvt->model == 0x60)
250 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
251 } else
252 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
238 253
239 scrubval = scrubval & 0x001F; 254 scrubval = scrubval & 0x001F;
240 255
@@ -2770,7 +2785,7 @@ static int init_one_instance(struct pci_dev *F2)
2770 struct mem_ctl_info *mci = NULL; 2785 struct mem_ctl_info *mci = NULL;
2771 struct edac_mc_layer layers[2]; 2786 struct edac_mc_layer layers[2];
2772 int err = 0, ret; 2787 int err = 0, ret;
2773 u16 nid = amd_get_node_id(F2); 2788 u16 nid = amd_pci_dev_to_node_id(F2);
2774 2789
2775 ret = -ENOMEM; 2790 ret = -ENOMEM;
2776 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); 2791 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2860,7 +2875,7 @@ err_ret:
2860static int probe_one_instance(struct pci_dev *pdev, 2875static int probe_one_instance(struct pci_dev *pdev,
2861 const struct pci_device_id *mc_type) 2876 const struct pci_device_id *mc_type)
2862{ 2877{
2863 u16 nid = amd_get_node_id(pdev); 2878 u16 nid = amd_pci_dev_to_node_id(pdev);
2864 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2879 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2865 struct ecc_settings *s; 2880 struct ecc_settings *s;
2866 int ret = 0; 2881 int ret = 0;
@@ -2910,7 +2925,7 @@ static void remove_one_instance(struct pci_dev *pdev)
2910{ 2925{
2911 struct mem_ctl_info *mci; 2926 struct mem_ctl_info *mci;
2912 struct amd64_pvt *pvt; 2927 struct amd64_pvt *pvt;
2913 u16 nid = amd_get_node_id(pdev); 2928 u16 nid = amd_pci_dev_to_node_id(pdev);
2914 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2929 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2915 struct ecc_settings *s = ecc_stngs[nid]; 2930 struct ecc_settings *s = ecc_stngs[nid];
2916 2931
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 4bdec752d330..c0f248f3aaf9 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -2,64 +2,10 @@
2 * AMD64 class Memory Controller kernel module 2 * AMD64 class Memory Controller kernel module
3 * 3 *
4 * Copyright (c) 2009 SoftwareBitMaker. 4 * Copyright (c) 2009 SoftwareBitMaker.
5 * Copyright (c) 2009 Advanced Micro Devices, Inc. 5 * Copyright (c) 2009-15 Advanced Micro Devices, Inc.
6 * 6 *
7 * This file may be distributed under the terms of the 7 * This file may be distributed under the terms of the
8 * GNU General Public License. 8 * GNU General Public License.
9 *
10 * Originally Written by Thayne Harbaugh
11 *
12 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
13 * - K8 CPU Revision D and greater support
14 *
15 * Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
16 * - Module largely rewritten, with new (and hopefully correct)
17 * code for dealing with node and chip select interleaving,
18 * various code cleanup, and bug fixes
19 * - Added support for memory hoisting using DRAM hole address
20 * register
21 *
22 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
23 * -K8 Rev (1207) revision support added, required Revision
24 * specific mini-driver code to support Rev F as well as
25 * prior revisions
26 *
27 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
28 * -Family 10h revision support added. New PCI Device IDs,
29 * indicating new changes. Actual registers modified
30 * were slight, less than the Rev E to Rev F transition
31 * but changing the PCI Device ID was the proper thing to
32 * do, as it provides for almost automactic family
33 * detection. The mods to Rev F required more family
34 * information detection.
35 *
36 * Changes/Fixes by Borislav Petkov <bp@alien8.de>:
37 * - misc fixes and code cleanups
38 *
39 * This module is based on the following documents
40 * (available from http://www.amd.com/):
41 *
42 * Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
43 * Opteron Processors
44 * AMD publication #: 26094
45 *` Revision: 3.26
46 *
47 * Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
48 * Processors
49 * AMD publication #: 32559
50 * Revision: 3.00
51 * Issue Date: May 2006
52 *
53 * Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
54 * Processors
55 * AMD publication #: 31116
56 * Revision: 3.00
57 * Issue Date: September 07, 2007
58 *
59 * Sections in the first 2 documents are no longer in sync with each other.
60 * The Family 10h BKDG was totally re-written from scratch with a new
61 * presentation model.
62 * Therefore, comments that refer to a Document section might be off.
63 */ 9 */
64 10
65#include <linux/module.h> 11#include <linux/module.h>
@@ -255,6 +201,8 @@
255 201
256#define DCT_SEL_HI 0x114 202#define DCT_SEL_HI 0x114
257 203
204#define F15H_M60H_SCRCTRL 0x1C8
205
258/* 206/*
259 * Function 3 - Misc Control 207 * Function 3 - Misc Control
260 */ 208 */
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
new file mode 100644
index 000000000000..54d2f668cb0a
--- /dev/null
+++ b/drivers/edac/debugfs.c
@@ -0,0 +1,163 @@
1#include "edac_module.h"
2
3static struct dentry *edac_debugfs;
4
5static ssize_t edac_fake_inject_write(struct file *file,
6 const char __user *data,
7 size_t count, loff_t *ppos)
8{
9 struct device *dev = file->private_data;
10 struct mem_ctl_info *mci = to_mci(dev);
11 static enum hw_event_mc_err_type type;
12 u16 errcount = mci->fake_inject_count;
13
14 if (!errcount)
15 errcount = 1;
16
17 type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
18 : HW_EVENT_ERR_CORRECTED;
19
20 printk(KERN_DEBUG
21 "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
22 errcount,
23 (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
24 errcount > 1 ? "s" : "",
25 mci->fake_inject_layer[0],
26 mci->fake_inject_layer[1],
27 mci->fake_inject_layer[2]
28 );
29 edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
30 mci->fake_inject_layer[0],
31 mci->fake_inject_layer[1],
32 mci->fake_inject_layer[2],
33 "FAKE ERROR", "for EDAC testing only");
34
35 return count;
36}
37
38static const struct file_operations debug_fake_inject_fops = {
39 .open = simple_open,
40 .write = edac_fake_inject_write,
41 .llseek = generic_file_llseek,
42};
43
44int __init edac_debugfs_init(void)
45{
46 edac_debugfs = debugfs_create_dir("edac", NULL);
47 if (IS_ERR(edac_debugfs)) {
48 edac_debugfs = NULL;
49 return -ENOMEM;
50 }
51 return 0;
52}
53
54void edac_debugfs_exit(void)
55{
56 debugfs_remove(edac_debugfs);
57}
58
59int edac_create_debugfs_nodes(struct mem_ctl_info *mci)
60{
61 struct dentry *d, *parent;
62 char name[80];
63 int i;
64
65 if (!edac_debugfs)
66 return -ENODEV;
67
68 d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
69 if (!d)
70 return -ENOMEM;
71 parent = d;
72
73 for (i = 0; i < mci->n_layers; i++) {
74 sprintf(name, "fake_inject_%s",
75 edac_layer_name[mci->layers[i].type]);
76 d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
77 &mci->fake_inject_layer[i]);
78 if (!d)
79 goto nomem;
80 }
81
82 d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
83 &mci->fake_inject_ue);
84 if (!d)
85 goto nomem;
86
87 d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
88 &mci->fake_inject_count);
89 if (!d)
90 goto nomem;
91
92 d = debugfs_create_file("fake_inject", S_IWUSR, parent,
93 &mci->dev,
94 &debug_fake_inject_fops);
95 if (!d)
96 goto nomem;
97
98 mci->debugfs = parent;
99 return 0;
100nomem:
101 edac_debugfs_remove_recursive(mci->debugfs);
102 return -ENOMEM;
103}
104
105/* Create a toplevel dir under EDAC's debugfs hierarchy */
106struct dentry *edac_debugfs_create_dir(const char *dirname)
107{
108 if (!edac_debugfs)
109 return NULL;
110
111 return debugfs_create_dir(dirname, edac_debugfs);
112}
113EXPORT_SYMBOL_GPL(edac_debugfs_create_dir);
114
115/* Create a toplevel dir under EDAC's debugfs hierarchy with parent @parent */
116struct dentry *
117edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent)
118{
119 return debugfs_create_dir(dirname, parent);
120}
121EXPORT_SYMBOL_GPL(edac_debugfs_create_dir_at);
122
123/*
124 * Create a file under EDAC's hierarchy or a sub-hierarchy:
125 *
126 * @name: file name
127 * @mode: file permissions
128 * @parent: parent dentry. If NULL, it becomes the toplevel EDAC dir
129 * @data: private data of caller
130 * @fops: file operations of this file
131 */
132struct dentry *
133edac_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent,
134 void *data, const struct file_operations *fops)
135{
136 if (!parent)
137 parent = edac_debugfs;
138
139 return debugfs_create_file(name, mode, parent, data, fops);
140}
141EXPORT_SYMBOL_GPL(edac_debugfs_create_file);
142
143/* Wrapper for debugfs_create_x8() */
144struct dentry *edac_debugfs_create_x8(const char *name, umode_t mode,
145 struct dentry *parent, u8 *value)
146{
147 if (!parent)
148 parent = edac_debugfs;
149
150 return debugfs_create_x8(name, mode, parent, value);
151}
152EXPORT_SYMBOL_GPL(edac_debugfs_create_x8);
153
154/* Wrapper for debugfs_create_x16() */
155struct dentry *edac_debugfs_create_x16(const char *name, umode_t mode,
156 struct dentry *parent, u16 *value)
157{
158 if (!parent)
159 parent = edac_debugfs;
160
161 return debugfs_create_x16(name, mode, parent, value);
162}
163EXPORT_SYMBOL_GPL(edac_debugfs_create_x16);
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index ad42587c3f4d..4861542163d7 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -94,6 +94,8 @@ do { \
94 94
95#define edac_dev_name(dev) (dev)->dev_name 95#define edac_dev_name(dev) (dev)->dev_name
96 96
97#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
98
97/* 99/*
98 * The following are the structures to provide for a generic 100 * The following are the structures to provide for a generic
99 * or abstract 'edac_device'. This set of structures and the 101 * or abstract 'edac_device'. This set of structures and the
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 943ed8cf71b9..77ecd6a4179a 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -1302,7 +1302,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1302 grain_bits = fls_long(e->grain) + 1; 1302 grain_bits = fls_long(e->grain) + 1;
1303 trace_mc_event(type, e->msg, e->label, e->error_count, 1303 trace_mc_event(type, e->msg, e->label, e->error_count,
1304 mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer, 1304 mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
1305 PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page, 1305 (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
1306 grain_bits, e->syndrome, e->other_detail); 1306 grain_bits, e->syndrome, e->other_detail);
1307 1307
1308 edac_raw_mc_handle_error(type, mci, e); 1308 edac_raw_mc_handle_error(type, mci, e);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 33df7d93c857..a75acea0f674 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -229,7 +229,7 @@ static ssize_t channel_dimm_label_show(struct device *dev,
229 if (!rank->dimm->label[0]) 229 if (!rank->dimm->label[0])
230 return 0; 230 return 0;
231 231
232 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", 232 return snprintf(data, sizeof(rank->dimm->label) + 1, "%s\n",
233 rank->dimm->label); 233 rank->dimm->label);
234} 234}
235 235
@@ -240,14 +240,21 @@ static ssize_t channel_dimm_label_store(struct device *dev,
240 struct csrow_info *csrow = to_csrow(dev); 240 struct csrow_info *csrow = to_csrow(dev);
241 unsigned chan = to_channel(mattr); 241 unsigned chan = to_channel(mattr);
242 struct rank_info *rank = csrow->channels[chan]; 242 struct rank_info *rank = csrow->channels[chan];
243 size_t copy_count = count;
243 244
244 ssize_t max_size = 0; 245 if (count == 0)
246 return -EINVAL;
247
248 if (data[count - 1] == '\0' || data[count - 1] == '\n')
249 copy_count -= 1;
250
251 if (copy_count == 0 || copy_count >= sizeof(rank->dimm->label))
252 return -EINVAL;
245 253
246 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); 254 strncpy(rank->dimm->label, data, copy_count);
247 strncpy(rank->dimm->label, data, max_size); 255 rank->dimm->label[copy_count] = '\0';
248 rank->dimm->label[max_size] = '\0';
249 256
250 return max_size; 257 return count;
251} 258}
252 259
253/* show function for dynamic chX_ce_count attribute */ 260/* show function for dynamic chX_ce_count attribute */
@@ -485,7 +492,7 @@ static ssize_t dimmdev_label_show(struct device *dev,
485 if (!dimm->label[0]) 492 if (!dimm->label[0])
486 return 0; 493 return 0;
487 494
488 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label); 495 return snprintf(data, sizeof(dimm->label) + 1, "%s\n", dimm->label);
489} 496}
490 497
491static ssize_t dimmdev_label_store(struct device *dev, 498static ssize_t dimmdev_label_store(struct device *dev,
@@ -494,14 +501,21 @@ static ssize_t dimmdev_label_store(struct device *dev,
494 size_t count) 501 size_t count)
495{ 502{
496 struct dimm_info *dimm = to_dimm(dev); 503 struct dimm_info *dimm = to_dimm(dev);
504 size_t copy_count = count;
497 505
498 ssize_t max_size = 0; 506 if (count == 0)
507 return -EINVAL;
499 508
500 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); 509 if (data[count - 1] == '\0' || data[count - 1] == '\n')
501 strncpy(dimm->label, data, max_size); 510 copy_count -= 1;
502 dimm->label[max_size] = '\0';
503 511
504 return max_size; 512 if (copy_count == 0 || copy_count >= sizeof(dimm->label))
513 return -EINVAL;
514
515 strncpy(dimm->label, data, copy_count);
516 dimm->label[copy_count] = '\0';
517
518 return count;
505} 519}
506 520
507static ssize_t dimmdev_size_show(struct device *dev, 521static ssize_t dimmdev_size_show(struct device *dev,
@@ -785,47 +799,6 @@ static ssize_t mci_max_location_show(struct device *dev,
785 return p - data; 799 return p - data;
786} 800}
787 801
788#ifdef CONFIG_EDAC_DEBUG
789static ssize_t edac_fake_inject_write(struct file *file,
790 const char __user *data,
791 size_t count, loff_t *ppos)
792{
793 struct device *dev = file->private_data;
794 struct mem_ctl_info *mci = to_mci(dev);
795 static enum hw_event_mc_err_type type;
796 u16 errcount = mci->fake_inject_count;
797
798 if (!errcount)
799 errcount = 1;
800
801 type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
802 : HW_EVENT_ERR_CORRECTED;
803
804 printk(KERN_DEBUG
805 "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
806 errcount,
807 (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
808 errcount > 1 ? "s" : "",
809 mci->fake_inject_layer[0],
810 mci->fake_inject_layer[1],
811 mci->fake_inject_layer[2]
812 );
813 edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
814 mci->fake_inject_layer[0],
815 mci->fake_inject_layer[1],
816 mci->fake_inject_layer[2],
817 "FAKE ERROR", "for EDAC testing only");
818
819 return count;
820}
821
822static const struct file_operations debug_fake_inject_fops = {
823 .open = simple_open,
824 .write = edac_fake_inject_write,
825 .llseek = generic_file_llseek,
826};
827#endif
828
829/* default Control file */ 802/* default Control file */
830static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); 803static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
831 804
@@ -896,71 +869,6 @@ static struct device_type mci_attr_type = {
896 .release = mci_attr_release, 869 .release = mci_attr_release,
897}; 870};
898 871
899#ifdef CONFIG_EDAC_DEBUG
900static struct dentry *edac_debugfs;
901
902int __init edac_debugfs_init(void)
903{
904 edac_debugfs = debugfs_create_dir("edac", NULL);
905 if (IS_ERR(edac_debugfs)) {
906 edac_debugfs = NULL;
907 return -ENOMEM;
908 }
909 return 0;
910}
911
912void edac_debugfs_exit(void)
913{
914 debugfs_remove(edac_debugfs);
915}
916
917static int edac_create_debug_nodes(struct mem_ctl_info *mci)
918{
919 struct dentry *d, *parent;
920 char name[80];
921 int i;
922
923 if (!edac_debugfs)
924 return -ENODEV;
925
926 d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
927 if (!d)
928 return -ENOMEM;
929 parent = d;
930
931 for (i = 0; i < mci->n_layers; i++) {
932 sprintf(name, "fake_inject_%s",
933 edac_layer_name[mci->layers[i].type]);
934 d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
935 &mci->fake_inject_layer[i]);
936 if (!d)
937 goto nomem;
938 }
939
940 d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
941 &mci->fake_inject_ue);
942 if (!d)
943 goto nomem;
944
945 d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
946 &mci->fake_inject_count);
947 if (!d)
948 goto nomem;
949
950 d = debugfs_create_file("fake_inject", S_IWUSR, parent,
951 &mci->dev,
952 &debug_fake_inject_fops);
953 if (!d)
954 goto nomem;
955
956 mci->debugfs = parent;
957 return 0;
958nomem:
959 debugfs_remove(mci->debugfs);
960 return -ENOMEM;
961}
962#endif
963
964/* 872/*
965 * Create a new Memory Controller kobject instance, 873 * Create a new Memory Controller kobject instance,
966 * mc<id> under the 'mc' directory 874 * mc<id> under the 'mc' directory
@@ -1039,9 +947,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
1039 goto fail_unregister_dimm; 947 goto fail_unregister_dimm;
1040#endif 948#endif
1041 949
1042#ifdef CONFIG_EDAC_DEBUG 950 edac_create_debugfs_nodes(mci);
1043 edac_create_debug_nodes(mci);
1044#endif
1045 return 0; 951 return 0;
1046 952
1047fail_unregister_dimm: 953fail_unregister_dimm:
@@ -1070,7 +976,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1070 edac_dbg(0, "\n"); 976 edac_dbg(0, "\n");
1071 977
1072#ifdef CONFIG_EDAC_DEBUG 978#ifdef CONFIG_EDAC_DEBUG
1073 debugfs_remove(mci->debugfs); 979 edac_debugfs_remove_recursive(mci->debugfs);
1074#endif 980#endif
1075#ifdef CONFIG_EDAC_LEGACY_SYSFS 981#ifdef CONFIG_EDAC_LEGACY_SYSFS
1076 edac_delete_csrow_objects(mci); 982 edac_delete_csrow_objects(mci);
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 26ecc52e073d..b95a48fc723d 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -60,15 +60,39 @@ extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
60/* 60/*
61 * EDAC debugfs functions 61 * EDAC debugfs functions
62 */ 62 */
63
64#define edac_debugfs_remove_recursive debugfs_remove_recursive
65#define edac_debugfs_remove debugfs_remove
63#ifdef CONFIG_EDAC_DEBUG 66#ifdef CONFIG_EDAC_DEBUG
64int edac_debugfs_init(void); 67int edac_debugfs_init(void);
65void edac_debugfs_exit(void); 68void edac_debugfs_exit(void);
69int edac_create_debugfs_nodes(struct mem_ctl_info *mci);
70struct dentry *edac_debugfs_create_dir(const char *dirname);
71struct dentry *
72edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent);
73struct dentry *
74edac_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent,
75 void *data, const struct file_operations *fops);
76struct dentry *
77edac_debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value);
78struct dentry *
79edac_debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, u16 *value);
66#else 80#else
67static inline int edac_debugfs_init(void) 81static inline int edac_debugfs_init(void) { return -ENODEV; }
68{ 82static inline void edac_debugfs_exit(void) { }
69 return -ENODEV; 83static inline int edac_create_debugfs_nodes(struct mem_ctl_info *mci) { return 0; }
70} 84static inline struct dentry *edac_debugfs_create_dir(const char *dirname) { return NULL; }
71static inline void edac_debugfs_exit(void) {} 85static inline struct dentry *
86edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent) { return NULL; }
87static inline struct dentry *
88edac_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent,
89 void *data, const struct file_operations *fops) { return NULL; }
90static inline struct dentry *
91edac_debugfs_create_x8(const char *name, umode_t mode,
92 struct dentry *parent, u8 *value) { return NULL; }
93static inline struct dentry *
94edac_debugfs_create_x16(const char *name, umode_t mode,
95 struct dentry *parent, u16 *value) { return NULL; }
72#endif 96#endif
73 97
74/* 98/*
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index b24681998740..e3fa4390f846 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -66,26 +66,6 @@ struct ghes_edac_dimm_fill {
66 unsigned count; 66 unsigned count;
67}; 67};
68 68
69char *memory_type[] = {
70 [MEM_EMPTY] = "EMPTY",
71 [MEM_RESERVED] = "RESERVED",
72 [MEM_UNKNOWN] = "UNKNOWN",
73 [MEM_FPM] = "FPM",
74 [MEM_EDO] = "EDO",
75 [MEM_BEDO] = "BEDO",
76 [MEM_SDR] = "SDR",
77 [MEM_RDR] = "RDR",
78 [MEM_DDR] = "DDR",
79 [MEM_RDDR] = "RDDR",
80 [MEM_RMBS] = "RMBS",
81 [MEM_DDR2] = "DDR2",
82 [MEM_FB_DDR2] = "FB_DDR2",
83 [MEM_RDDR2] = "RDDR2",
84 [MEM_XDR] = "XDR",
85 [MEM_DDR3] = "DDR3",
86 [MEM_RDDR3] = "RDDR3",
87};
88
89static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg) 69static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
90{ 70{
91 int *num_dimm = arg; 71 int *num_dimm = arg;
@@ -173,7 +153,7 @@ static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
173 153
174 if (dimm->nr_pages) { 154 if (dimm->nr_pages) {
175 edac_dbg(1, "DIMM%i: %s size = %d MB%s\n", 155 edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
176 dimm_fill->count, memory_type[dimm->mtype], 156 dimm_fill->count, edac_mem_types[dimm->mtype],
177 PAGES_TO_MiB(dimm->nr_pages), 157 PAGES_TO_MiB(dimm->nr_pages),
178 (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : ""); 158 (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : "");
179 edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n", 159 edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
@@ -417,7 +397,7 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
417 "APEI location: %s %s", e->location, e->other_detail); 397 "APEI location: %s %s", e->location, e->other_detail);
418 trace_mc_event(type, e->msg, e->label, e->error_count, 398 trace_mc_event(type, e->msg, e->label, e->error_count,
419 mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer, 399 mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
420 PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page, 400 (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
421 grain_bits, e->syndrome, pvt->detail_location); 401 grain_bits, e->syndrome, pvt->detail_location);
422 402
423 /* Report the error via EDAC API */ 403 /* Report the error via EDAC API */
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index e9f8a393915a..40917775dca1 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -30,6 +30,7 @@
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31 31
32#include "edac_core.h" 32#include "edac_core.h"
33#include "edac_module.h"
33 34
34/* register addresses */ 35/* register addresses */
35 36
@@ -966,25 +967,25 @@ static int i5100_setup_debugfs(struct mem_ctl_info *mci)
966 if (!i5100_debugfs) 967 if (!i5100_debugfs)
967 return -ENODEV; 968 return -ENODEV;
968 969
969 priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs); 970 priv->debugfs = edac_debugfs_create_dir_at(mci->bus->name, i5100_debugfs);
970 971
971 if (!priv->debugfs) 972 if (!priv->debugfs)
972 return -ENOMEM; 973 return -ENOMEM;
973 974
974 debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs, 975 edac_debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs,
975 &priv->inject_channel); 976 &priv->inject_channel);
976 debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs, 977 edac_debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs,
977 &priv->inject_hlinesel); 978 &priv->inject_hlinesel);
978 debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs, 979 edac_debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs,
979 &priv->inject_deviceptr1); 980 &priv->inject_deviceptr1);
980 debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs, 981 edac_debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs,
981 &priv->inject_deviceptr2); 982 &priv->inject_deviceptr2);
982 debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs, 983 edac_debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs,
983 &priv->inject_eccmask1); 984 &priv->inject_eccmask1);
984 debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs, 985 edac_debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs,
985 &priv->inject_eccmask2); 986 &priv->inject_eccmask2);
986 debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs, 987 edac_debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs,
987 &mci->dev, &i5100_inject_enable_fops); 988 &mci->dev, &i5100_inject_enable_fops);
988 989
989 return 0; 990 return 0;
990 991
@@ -1189,7 +1190,7 @@ static void i5100_remove_one(struct pci_dev *pdev)
1189 1190
1190 priv = mci->pvt_info; 1191 priv = mci->pvt_info;
1191 1192
1192 debugfs_remove_recursive(priv->debugfs); 1193 edac_debugfs_remove_recursive(priv->debugfs);
1193 1194
1194 priv->scrub_enable = 0; 1195 priv->scrub_enable = 0;
1195 cancel_delayed_work_sync(&(priv->i5100_scrubbing)); 1196 cancel_delayed_work_sync(&(priv->i5100_scrubbing));
@@ -1223,7 +1224,7 @@ static int __init i5100_init(void)
1223{ 1224{
1224 int pci_rc; 1225 int pci_rc;
1225 1226
1226 i5100_debugfs = debugfs_create_dir("i5100_edac", NULL); 1227 i5100_debugfs = edac_debugfs_create_dir_at("i5100_edac", NULL);
1227 1228
1228 pci_rc = pci_register_driver(&i5100_driver); 1229 pci_rc = pci_register_driver(&i5100_driver);
1229 return (pci_rc < 0) ? pci_rc : 0; 1230 return (pci_rc < 0) ? pci_rc : 0;
@@ -1231,7 +1232,7 @@ static int __init i5100_init(void)
1231 1232
1232static void __exit i5100_exit(void) 1233static void __exit i5100_exit(void)
1233{ 1234{
1234 debugfs_remove(i5100_debugfs); 1235 edac_debugfs_remove(i5100_debugfs);
1235 1236
1236 pci_unregister_driver(&i5100_driver); 1237 pci_unregister_driver(&i5100_driver);
1237} 1238}
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 711d8ad74f11..d3a64ba61fa3 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -199,6 +199,7 @@ static const struct of_device_id ppc4xx_edac_match[] = {
199 }, 199 },
200 { } 200 { }
201}; 201};
202MODULE_DEVICE_TABLE(of, ppc4xx_edac_match);
202 203
203static struct platform_driver ppc4xx_edac_driver = { 204static struct platform_driver ppc4xx_edac_driver = {
204 .probe = ppc4xx_edac_probe, 205 .probe = ppc4xx_edac_probe,
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index cf1268ddef0c..429309c62699 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1688,6 +1688,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
1688{ 1688{
1689 struct sbridge_pvt *pvt = mci->pvt_info; 1689 struct sbridge_pvt *pvt = mci->pvt_info;
1690 struct pci_dev *pdev; 1690 struct pci_dev *pdev;
1691 u8 saw_chan_mask = 0;
1691 int i; 1692 int i;
1692 1693
1693 for (i = 0; i < sbridge_dev->n_devs; i++) { 1694 for (i = 0; i < sbridge_dev->n_devs; i++) {
@@ -1721,6 +1722,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
1721 { 1722 {
1722 int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0; 1723 int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
1723 pvt->pci_tad[id] = pdev; 1724 pvt->pci_tad[id] = pdev;
1725 saw_chan_mask |= 1 << id;
1724 } 1726 }
1725 break; 1727 break;
1726 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO: 1728 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
@@ -1741,10 +1743,8 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
1741 !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta) 1743 !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta)
1742 goto enodev; 1744 goto enodev;
1743 1745
1744 for (i = 0; i < NUM_CHANNELS; i++) { 1746 if (saw_chan_mask != 0x0f)
1745 if (!pvt->pci_tad[i]) 1747 goto enodev;
1746 goto enodev;
1747 }
1748 return 0; 1748 return 0;
1749 1749
1750enodev: 1750enodev:
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index ba06904af2e1..41f876414a18 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -29,6 +29,7 @@
29#include <linux/regmap.h> 29#include <linux/regmap.h>
30 30
31#include "edac_core.h" 31#include "edac_core.h"
32#include "edac_module.h"
32 33
33#define EDAC_MOD_STR "xgene_edac" 34#define EDAC_MOD_STR "xgene_edac"
34 35
@@ -62,10 +63,12 @@ struct xgene_edac {
62 struct regmap *efuse_map; 63 struct regmap *efuse_map;
63 void __iomem *pcp_csr; 64 void __iomem *pcp_csr;
64 spinlock_t lock; 65 spinlock_t lock;
65 struct dentry *dfs; 66 struct dentry *dfs;
66 67
67 struct list_head mcus; 68 struct list_head mcus;
68 struct list_head pmds; 69 struct list_head pmds;
70 struct list_head l3s;
71 struct list_head socs;
69 72
70 struct mutex mc_lock; 73 struct mutex mc_lock;
71 int mc_active_mask; 74 int mc_active_mask;
@@ -172,12 +175,12 @@ static void xgene_edac_mc_create_debugfs_node(struct mem_ctl_info *mci)
172{ 175{
173 if (!IS_ENABLED(CONFIG_EDAC_DEBUG)) 176 if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
174 return; 177 return;
175#ifdef CONFIG_EDAC_DEBUG 178
176 if (!mci->debugfs) 179 if (!mci->debugfs)
177 return; 180 return;
178 debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci, 181
179 &xgene_edac_mc_debug_inject_fops); 182 edac_debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
180#endif 183 &xgene_edac_mc_debug_inject_fops);
181} 184}
182 185
183static void xgene_edac_mc_check(struct mem_ctl_info *mci) 186static void xgene_edac_mc_check(struct mem_ctl_info *mci)
@@ -536,140 +539,134 @@ static void xgene_edac_pmd_l1_check(struct edac_device_ctl_info *edac_dev,
536 pg_f = ctx->pmd_csr + cpu_idx * CPU_CSR_STRIDE + CPU_MEMERR_CPU_PAGE; 539 pg_f = ctx->pmd_csr + cpu_idx * CPU_CSR_STRIDE + CPU_MEMERR_CPU_PAGE;
537 540
538 val = readl(pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET); 541 val = readl(pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
539 if (val) { 542 if (!val)
540 dev_err(edac_dev->dev, 543 goto chk_lsu;
541 "CPU%d L1 memory error ICF 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n", 544 dev_err(edac_dev->dev,
542 ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val, 545 "CPU%d L1 memory error ICF 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
543 MEMERR_CPU_ICFESR_ERRWAY_RD(val), 546 ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
544 MEMERR_CPU_ICFESR_ERRINDEX_RD(val), 547 MEMERR_CPU_ICFESR_ERRWAY_RD(val),
545 MEMERR_CPU_ICFESR_ERRINFO_RD(val)); 548 MEMERR_CPU_ICFESR_ERRINDEX_RD(val),
546 if (val & MEMERR_CPU_ICFESR_CERR_MASK) 549 MEMERR_CPU_ICFESR_ERRINFO_RD(val));
547 dev_err(edac_dev->dev, 550 if (val & MEMERR_CPU_ICFESR_CERR_MASK)
548 "One or more correctable error\n"); 551 dev_err(edac_dev->dev, "One or more correctable error\n");
549 if (val & MEMERR_CPU_ICFESR_MULTCERR_MASK) 552 if (val & MEMERR_CPU_ICFESR_MULTCERR_MASK)
550 dev_err(edac_dev->dev, "Multiple correctable error\n"); 553 dev_err(edac_dev->dev, "Multiple correctable error\n");
551 switch (MEMERR_CPU_ICFESR_ERRTYPE_RD(val)) { 554 switch (MEMERR_CPU_ICFESR_ERRTYPE_RD(val)) {
552 case 1: 555 case 1:
553 dev_err(edac_dev->dev, "L1 TLB multiple hit\n"); 556 dev_err(edac_dev->dev, "L1 TLB multiple hit\n");
554 break; 557 break;
555 case 2: 558 case 2:
556 dev_err(edac_dev->dev, "Way select multiple hit\n"); 559 dev_err(edac_dev->dev, "Way select multiple hit\n");
557 break; 560 break;
558 case 3: 561 case 3:
559 dev_err(edac_dev->dev, "Physical tag parity error\n"); 562 dev_err(edac_dev->dev, "Physical tag parity error\n");
560 break; 563 break;
561 case 4: 564 case 4:
562 case 5: 565 case 5:
563 dev_err(edac_dev->dev, "L1 data parity error\n"); 566 dev_err(edac_dev->dev, "L1 data parity error\n");
564 break; 567 break;
565 case 6: 568 case 6:
566 dev_err(edac_dev->dev, "L1 pre-decode parity error\n"); 569 dev_err(edac_dev->dev, "L1 pre-decode parity error\n");
567 break; 570 break;
568 } 571 }
569 572
570 /* Clear any HW errors */ 573 /* Clear any HW errors */
571 writel(val, pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET); 574 writel(val, pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
572 575
573 if (val & (MEMERR_CPU_ICFESR_CERR_MASK | 576 if (val & (MEMERR_CPU_ICFESR_CERR_MASK |
574 MEMERR_CPU_ICFESR_MULTCERR_MASK)) 577 MEMERR_CPU_ICFESR_MULTCERR_MASK))
575 edac_device_handle_ce(edac_dev, 0, 0, 578 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
576 edac_dev->ctl_name);
577 }
578 579
580chk_lsu:
579 val = readl(pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET); 581 val = readl(pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
580 if (val) { 582 if (!val)
583 goto chk_mmu;
584 dev_err(edac_dev->dev,
585 "CPU%d memory error LSU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
586 ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
587 MEMERR_CPU_LSUESR_ERRWAY_RD(val),
588 MEMERR_CPU_LSUESR_ERRINDEX_RD(val),
589 MEMERR_CPU_LSUESR_ERRINFO_RD(val));
590 if (val & MEMERR_CPU_LSUESR_CERR_MASK)
591 dev_err(edac_dev->dev, "One or more correctable error\n");
592 if (val & MEMERR_CPU_LSUESR_MULTCERR_MASK)
593 dev_err(edac_dev->dev, "Multiple correctable error\n");
594 switch (MEMERR_CPU_LSUESR_ERRTYPE_RD(val)) {
595 case 0:
596 dev_err(edac_dev->dev, "Load tag error\n");
597 break;
598 case 1:
599 dev_err(edac_dev->dev, "Load data error\n");
600 break;
601 case 2:
602 dev_err(edac_dev->dev, "WSL multihit error\n");
603 break;
604 case 3:
605 dev_err(edac_dev->dev, "Store tag error\n");
606 break;
607 case 4:
581 dev_err(edac_dev->dev, 608 dev_err(edac_dev->dev,
582 "CPU%d memory error LSU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n", 609 "DTB multihit from load pipeline error\n");
583 ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val, 610 break;
584 MEMERR_CPU_LSUESR_ERRWAY_RD(val), 611 case 5:
585 MEMERR_CPU_LSUESR_ERRINDEX_RD(val), 612 dev_err(edac_dev->dev,
586 MEMERR_CPU_LSUESR_ERRINFO_RD(val)); 613 "DTB multihit from store pipeline error\n");
587 if (val & MEMERR_CPU_LSUESR_CERR_MASK) 614 break;
588 dev_err(edac_dev->dev, 615 }
589 "One or more correctable error\n");
590 if (val & MEMERR_CPU_LSUESR_MULTCERR_MASK)
591 dev_err(edac_dev->dev, "Multiple correctable error\n");
592 switch (MEMERR_CPU_LSUESR_ERRTYPE_RD(val)) {
593 case 0:
594 dev_err(edac_dev->dev, "Load tag error\n");
595 break;
596 case 1:
597 dev_err(edac_dev->dev, "Load data error\n");
598 break;
599 case 2:
600 dev_err(edac_dev->dev, "WSL multihit error\n");
601 break;
602 case 3:
603 dev_err(edac_dev->dev, "Store tag error\n");
604 break;
605 case 4:
606 dev_err(edac_dev->dev,
607 "DTB multihit from load pipeline error\n");
608 break;
609 case 5:
610 dev_err(edac_dev->dev,
611 "DTB multihit from store pipeline error\n");
612 break;
613 }
614 616
615 /* Clear any HW errors */ 617 /* Clear any HW errors */
616 writel(val, pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET); 618 writel(val, pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
617 619
618 if (val & (MEMERR_CPU_LSUESR_CERR_MASK | 620 if (val & (MEMERR_CPU_LSUESR_CERR_MASK |
619 MEMERR_CPU_LSUESR_MULTCERR_MASK)) 621 MEMERR_CPU_LSUESR_MULTCERR_MASK))
620 edac_device_handle_ce(edac_dev, 0, 0, 622 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
621 edac_dev->ctl_name);
622 }
623 623
624chk_mmu:
624 val = readl(pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET); 625 val = readl(pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
625 if (val) { 626 if (!val)
626 dev_err(edac_dev->dev, 627 return;
627 "CPU%d memory error MMU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X %s\n", 628 dev_err(edac_dev->dev,
628 ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val, 629 "CPU%d memory error MMU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X %s\n",
629 MEMERR_CPU_MMUESR_ERRWAY_RD(val), 630 ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
630 MEMERR_CPU_MMUESR_ERRINDEX_RD(val), 631 MEMERR_CPU_MMUESR_ERRWAY_RD(val),
631 MEMERR_CPU_MMUESR_ERRINFO_RD(val), 632 MEMERR_CPU_MMUESR_ERRINDEX_RD(val),
632 val & MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK ? "LSU" : 633 MEMERR_CPU_MMUESR_ERRINFO_RD(val),
633 "ICF"); 634 val & MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK ? "LSU" : "ICF");
634 if (val & MEMERR_CPU_MMUESR_CERR_MASK) 635 if (val & MEMERR_CPU_MMUESR_CERR_MASK)
635 dev_err(edac_dev->dev, 636 dev_err(edac_dev->dev, "One or more correctable error\n");
636 "One or more correctable error\n"); 637 if (val & MEMERR_CPU_MMUESR_MULTCERR_MASK)
637 if (val & MEMERR_CPU_MMUESR_MULTCERR_MASK) 638 dev_err(edac_dev->dev, "Multiple correctable error\n");
638 dev_err(edac_dev->dev, "Multiple correctable error\n"); 639 switch (MEMERR_CPU_MMUESR_ERRTYPE_RD(val)) {
639 switch (MEMERR_CPU_MMUESR_ERRTYPE_RD(val)) { 640 case 0:
640 case 0: 641 dev_err(edac_dev->dev, "Stage 1 UTB hit error\n");
641 dev_err(edac_dev->dev, "Stage 1 UTB hit error\n"); 642 break;
642 break; 643 case 1:
643 case 1: 644 dev_err(edac_dev->dev, "Stage 1 UTB miss error\n");
644 dev_err(edac_dev->dev, "Stage 1 UTB miss error\n"); 645 break;
645 break; 646 case 2:
646 case 2: 647 dev_err(edac_dev->dev, "Stage 1 UTB allocate error\n");
647 dev_err(edac_dev->dev, "Stage 1 UTB allocate error\n"); 648 break;
648 break; 649 case 3:
649 case 3: 650 dev_err(edac_dev->dev, "TMO operation single bank error\n");
650 dev_err(edac_dev->dev, 651 break;
651 "TMO operation single bank error\n"); 652 case 4:
652 break; 653 dev_err(edac_dev->dev, "Stage 2 UTB error\n");
653 case 4: 654 break;
654 dev_err(edac_dev->dev, "Stage 2 UTB error\n"); 655 case 5:
655 break; 656 dev_err(edac_dev->dev, "Stage 2 UTB miss error\n");
656 case 5: 657 break;
657 dev_err(edac_dev->dev, "Stage 2 UTB miss error\n"); 658 case 6:
658 break; 659 dev_err(edac_dev->dev, "Stage 2 UTB allocate error\n");
659 case 6: 660 break;
660 dev_err(edac_dev->dev, "Stage 2 UTB allocate error\n"); 661 case 7:
661 break; 662 dev_err(edac_dev->dev, "TMO operation multiple bank error\n");
662 case 7: 663 break;
663 dev_err(edac_dev->dev, 664 }
664 "TMO operation multiple bank error\n");
665 break;
666 }
667 665
668 /* Clear any HW errors */ 666 /* Clear any HW errors */
669 writel(val, pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET); 667 writel(val, pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
670 668
671 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); 669 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
672 }
673} 670}
674 671
675static void xgene_edac_pmd_l2_check(struct edac_device_ctl_info *edac_dev) 672static void xgene_edac_pmd_l2_check(struct edac_device_ctl_info *edac_dev)
@@ -684,60 +681,56 @@ static void xgene_edac_pmd_l2_check(struct edac_device_ctl_info *edac_dev)
684 /* Check L2 */ 681 /* Check L2 */
685 pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE; 682 pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
686 val = readl(pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET); 683 val = readl(pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
687 if (val) { 684 if (!val)
688 val_lo = readl(pg_e + MEMERR_L2C_L2EALR_PAGE_OFFSET); 685 goto chk_l2c;
689 val_hi = readl(pg_e + MEMERR_L2C_L2EAHR_PAGE_OFFSET); 686 val_lo = readl(pg_e + MEMERR_L2C_L2EALR_PAGE_OFFSET);
690 dev_err(edac_dev->dev, 687 val_hi = readl(pg_e + MEMERR_L2C_L2EAHR_PAGE_OFFSET);
691 "PMD%d memory error L2C L2ESR 0x%08X @ 0x%08X.%08X\n", 688 dev_err(edac_dev->dev,
692 ctx->pmd, val, val_hi, val_lo); 689 "PMD%d memory error L2C L2ESR 0x%08X @ 0x%08X.%08X\n",
693 dev_err(edac_dev->dev, 690 ctx->pmd, val, val_hi, val_lo);
694 "ErrSyndrome 0x%02X ErrWay 0x%02X ErrCpu %d ErrGroup 0x%02X ErrAction 0x%02X\n", 691 dev_err(edac_dev->dev,
695 MEMERR_L2C_L2ESR_ERRSYN_RD(val), 692 "ErrSyndrome 0x%02X ErrWay 0x%02X ErrCpu %d ErrGroup 0x%02X ErrAction 0x%02X\n",
696 MEMERR_L2C_L2ESR_ERRWAY_RD(val), 693 MEMERR_L2C_L2ESR_ERRSYN_RD(val),
697 MEMERR_L2C_L2ESR_ERRCPU_RD(val), 694 MEMERR_L2C_L2ESR_ERRWAY_RD(val),
698 MEMERR_L2C_L2ESR_ERRGROUP_RD(val), 695 MEMERR_L2C_L2ESR_ERRCPU_RD(val),
699 MEMERR_L2C_L2ESR_ERRACTION_RD(val)); 696 MEMERR_L2C_L2ESR_ERRGROUP_RD(val),
700 697 MEMERR_L2C_L2ESR_ERRACTION_RD(val));
701 if (val & MEMERR_L2C_L2ESR_ERR_MASK) 698
702 dev_err(edac_dev->dev, 699 if (val & MEMERR_L2C_L2ESR_ERR_MASK)
703 "One or more correctable error\n"); 700 dev_err(edac_dev->dev, "One or more correctable error\n");
704 if (val & MEMERR_L2C_L2ESR_MULTICERR_MASK) 701 if (val & MEMERR_L2C_L2ESR_MULTICERR_MASK)
705 dev_err(edac_dev->dev, "Multiple correctable error\n"); 702 dev_err(edac_dev->dev, "Multiple correctable error\n");
706 if (val & MEMERR_L2C_L2ESR_UCERR_MASK) 703 if (val & MEMERR_L2C_L2ESR_UCERR_MASK)
707 dev_err(edac_dev->dev, 704 dev_err(edac_dev->dev, "One or more uncorrectable error\n");
708 "One or more uncorrectable error\n"); 705 if (val & MEMERR_L2C_L2ESR_MULTUCERR_MASK)
709 if (val & MEMERR_L2C_L2ESR_MULTUCERR_MASK) 706 dev_err(edac_dev->dev, "Multiple uncorrectable error\n");
710 dev_err(edac_dev->dev, 707
711 "Multiple uncorrectable error\n"); 708 switch (MEMERR_L2C_L2ESR_ERRTYPE_RD(val)) {
712 709 case 0:
713 switch (MEMERR_L2C_L2ESR_ERRTYPE_RD(val)) { 710 dev_err(edac_dev->dev, "Outbound SDB parity error\n");
714 case 0: 711 break;
715 dev_err(edac_dev->dev, "Outbound SDB parity error\n"); 712 case 1:
716 break; 713 dev_err(edac_dev->dev, "Inbound SDB parity error\n");
717 case 1: 714 break;
718 dev_err(edac_dev->dev, "Inbound SDB parity error\n"); 715 case 2:
719 break; 716 dev_err(edac_dev->dev, "Tag ECC error\n");
720 case 2: 717 break;
721 dev_err(edac_dev->dev, "Tag ECC error\n"); 718 case 3:
722 break; 719 dev_err(edac_dev->dev, "Data ECC error\n");
723 case 3: 720 break;
724 dev_err(edac_dev->dev, "Data ECC error\n"); 721 }
725 break;
726 }
727 722
728 /* Clear any HW errors */ 723 /* Clear any HW errors */
729 writel(val, pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET); 724 writel(val, pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
730 725
731 if (val & (MEMERR_L2C_L2ESR_ERR_MASK | 726 if (val & (MEMERR_L2C_L2ESR_ERR_MASK |
732 MEMERR_L2C_L2ESR_MULTICERR_MASK)) 727 MEMERR_L2C_L2ESR_MULTICERR_MASK))
733 edac_device_handle_ce(edac_dev, 0, 0, 728 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
734 edac_dev->ctl_name); 729 if (val & (MEMERR_L2C_L2ESR_UCERR_MASK |
735 if (val & (MEMERR_L2C_L2ESR_UCERR_MASK | 730 MEMERR_L2C_L2ESR_MULTUCERR_MASK))
736 MEMERR_L2C_L2ESR_MULTUCERR_MASK)) 731 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
737 edac_device_handle_ue(edac_dev, 0, 0,
738 edac_dev->ctl_name);
739 }
740 732
733chk_l2c:
741 /* Check if any memory request timed out on L2 cache */ 734 /* Check if any memory request timed out on L2 cache */
742 pg_d = ctx->pmd_csr + CPU_L2C_PAGE; 735 pg_d = ctx->pmd_csr + CPU_L2C_PAGE;
743 val = readl(pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET); 736 val = readl(pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET);
@@ -877,35 +870,25 @@ static const struct file_operations xgene_edac_pmd_debug_inject_fops[] = {
877 { } 870 { }
878}; 871};
879 872
880static void xgene_edac_pmd_create_debugfs_nodes( 873static void
881 struct edac_device_ctl_info *edac_dev) 874xgene_edac_pmd_create_debugfs_nodes(struct edac_device_ctl_info *edac_dev)
882{ 875{
883 struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info; 876 struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
884 struct dentry *edac_debugfs; 877 struct dentry *dbgfs_dir;
885 char name[30]; 878 char name[10];
886 879
887 if (!IS_ENABLED(CONFIG_EDAC_DEBUG)) 880 if (!IS_ENABLED(CONFIG_EDAC_DEBUG) || !ctx->edac->dfs)
888 return; 881 return;
889 882
890 /* 883 snprintf(name, sizeof(name), "PMD%d", ctx->pmd);
891 * Todo: Switch to common EDAC debug file system for edac device 884 dbgfs_dir = edac_debugfs_create_dir_at(name, ctx->edac->dfs);
892 * when available. 885 if (!dbgfs_dir)
893 */
894 if (!ctx->edac->dfs) {
895 ctx->edac->dfs = debugfs_create_dir(edac_dev->dev->kobj.name,
896 NULL);
897 if (!ctx->edac->dfs)
898 return;
899 }
900 sprintf(name, "PMD%d", ctx->pmd);
901 edac_debugfs = debugfs_create_dir(name, ctx->edac->dfs);
902 if (!edac_debugfs)
903 return; 886 return;
904 887
905 debugfs_create_file("l1_inject_ctrl", S_IWUSR, edac_debugfs, edac_dev, 888 edac_debugfs_create_file("l1_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
906 &xgene_edac_pmd_debug_inject_fops[0]); 889 &xgene_edac_pmd_debug_inject_fops[0]);
907 debugfs_create_file("l2_inject_ctrl", S_IWUSR, edac_debugfs, edac_dev, 890 edac_debugfs_create_file("l2_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
908 &xgene_edac_pmd_debug_inject_fops[1]); 891 &xgene_edac_pmd_debug_inject_fops[1]);
909} 892}
910 893
911static int xgene_edac_pmd_available(u32 efuse, int pmd) 894static int xgene_edac_pmd_available(u32 efuse, int pmd)
@@ -941,7 +924,7 @@ static int xgene_edac_pmd_add(struct xgene_edac *edac, struct device_node *np,
941 goto err_group; 924 goto err_group;
942 } 925 }
943 926
944 sprintf(edac_name, "l2c%d", pmd); 927 snprintf(edac_name, sizeof(edac_name), "l2c%d", pmd);
945 edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx), 928 edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
946 edac_name, 1, "l2c", 1, 2, NULL, 929 edac_name, 1, "l2c", 1, 2, NULL,
947 0, edac_device_alloc_index()); 930 0, edac_device_alloc_index());
@@ -1016,10 +999,780 @@ static int xgene_edac_pmd_remove(struct xgene_edac_pmd_ctx *pmd)
1016 return 0; 999 return 0;
1017} 1000}
1018 1001
1002/* L3 Error device */
1003#define L3C_ESR (0x0A * 4)
1004#define L3C_ESR_DATATAG_MASK BIT(9)
1005#define L3C_ESR_MULTIHIT_MASK BIT(8)
1006#define L3C_ESR_UCEVICT_MASK BIT(6)
1007#define L3C_ESR_MULTIUCERR_MASK BIT(5)
1008#define L3C_ESR_MULTICERR_MASK BIT(4)
1009#define L3C_ESR_UCERR_MASK BIT(3)
1010#define L3C_ESR_CERR_MASK BIT(2)
1011#define L3C_ESR_UCERRINTR_MASK BIT(1)
1012#define L3C_ESR_CERRINTR_MASK BIT(0)
1013#define L3C_ECR (0x0B * 4)
1014#define L3C_ECR_UCINTREN BIT(3)
1015#define L3C_ECR_CINTREN BIT(2)
1016#define L3C_UCERREN BIT(1)
1017#define L3C_CERREN BIT(0)
1018#define L3C_ELR (0x0C * 4)
1019#define L3C_ELR_ERRSYN(src) ((src & 0xFF800000) >> 23)
1020#define L3C_ELR_ERRWAY(src) ((src & 0x007E0000) >> 17)
1021#define L3C_ELR_AGENTID(src) ((src & 0x0001E000) >> 13)
1022#define L3C_ELR_ERRGRP(src) ((src & 0x00000F00) >> 8)
1023#define L3C_ELR_OPTYPE(src) ((src & 0x000000F0) >> 4)
1024#define L3C_ELR_PADDRHIGH(src) (src & 0x0000000F)
1025#define L3C_AELR (0x0D * 4)
1026#define L3C_BELR (0x0E * 4)
1027#define L3C_BELR_BANK(src) (src & 0x0000000F)
1028
1029struct xgene_edac_dev_ctx {
1030 struct list_head next;
1031 struct device ddev;
1032 char *name;
1033 struct xgene_edac *edac;
1034 struct edac_device_ctl_info *edac_dev;
1035 int edac_idx;
1036 void __iomem *dev_csr;
1037 int version;
1038};
1039
1040/*
1041 * Version 1 of the L3 controller has broken single bit correctable logic for
1042 * certain error syndromes. Log them as uncorrectable in that case.
1043 */
1044static bool xgene_edac_l3_promote_to_uc_err(u32 l3cesr, u32 l3celr)
1045{
1046 if (l3cesr & L3C_ESR_DATATAG_MASK) {
1047 switch (L3C_ELR_ERRSYN(l3celr)) {
1048 case 0x13C:
1049 case 0x0B4:
1050 case 0x007:
1051 case 0x00D:
1052 case 0x00E:
1053 case 0x019:
1054 case 0x01A:
1055 case 0x01C:
1056 case 0x04E:
1057 case 0x041:
1058 return true;
1059 }
1060 } else if (L3C_ELR_ERRSYN(l3celr) == 9)
1061 return true;
1062
1063 return false;
1064}
1065
1066static void xgene_edac_l3_check(struct edac_device_ctl_info *edac_dev)
1067{
1068 struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
1069 u32 l3cesr;
1070 u32 l3celr;
1071 u32 l3caelr;
1072 u32 l3cbelr;
1073
1074 l3cesr = readl(ctx->dev_csr + L3C_ESR);
1075 if (!(l3cesr & (L3C_ESR_UCERR_MASK | L3C_ESR_CERR_MASK)))
1076 return;
1077
1078 if (l3cesr & L3C_ESR_UCERR_MASK)
1079 dev_err(edac_dev->dev, "L3C uncorrectable error\n");
1080 if (l3cesr & L3C_ESR_CERR_MASK)
1081 dev_warn(edac_dev->dev, "L3C correctable error\n");
1082
1083 l3celr = readl(ctx->dev_csr + L3C_ELR);
1084 l3caelr = readl(ctx->dev_csr + L3C_AELR);
1085 l3cbelr = readl(ctx->dev_csr + L3C_BELR);
1086 if (l3cesr & L3C_ESR_MULTIHIT_MASK)
1087 dev_err(edac_dev->dev, "L3C multiple hit error\n");
1088 if (l3cesr & L3C_ESR_UCEVICT_MASK)
1089 dev_err(edac_dev->dev,
1090 "L3C dropped eviction of line with error\n");
1091 if (l3cesr & L3C_ESR_MULTIUCERR_MASK)
1092 dev_err(edac_dev->dev, "L3C multiple uncorrectable error\n");
1093 if (l3cesr & L3C_ESR_DATATAG_MASK)
1094 dev_err(edac_dev->dev,
1095 "L3C data error syndrome 0x%X group 0x%X\n",
1096 L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRGRP(l3celr));
1097 else
1098 dev_err(edac_dev->dev,
1099 "L3C tag error syndrome 0x%X Way of Tag 0x%X Agent ID 0x%X Operation type 0x%X\n",
1100 L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRWAY(l3celr),
1101 L3C_ELR_AGENTID(l3celr), L3C_ELR_OPTYPE(l3celr));
1102 /*
1103 * NOTE: Address [41:38] in L3C_ELR_PADDRHIGH(l3celr).
1104 * Address [37:6] in l3caelr. Lower 6 bits are zero.
1105 */
1106 dev_err(edac_dev->dev, "L3C error address 0x%08X.%08X bank %d\n",
1107 L3C_ELR_PADDRHIGH(l3celr) << 6 | (l3caelr >> 26),
1108 (l3caelr & 0x3FFFFFFF) << 6, L3C_BELR_BANK(l3cbelr));
1109 dev_err(edac_dev->dev,
1110 "L3C error status register value 0x%X\n", l3cesr);
1111
1112 /* Clear L3C error interrupt */
1113 writel(0, ctx->dev_csr + L3C_ESR);
1114
1115 if (ctx->version <= 1 &&
1116 xgene_edac_l3_promote_to_uc_err(l3cesr, l3celr)) {
1117 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
1118 return;
1119 }
1120 if (l3cesr & L3C_ESR_CERR_MASK)
1121 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
1122 if (l3cesr & L3C_ESR_UCERR_MASK)
1123 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
1124}
1125
1126static void xgene_edac_l3_hw_init(struct edac_device_ctl_info *edac_dev,
1127 bool enable)
1128{
1129 struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
1130 u32 val;
1131
1132 val = readl(ctx->dev_csr + L3C_ECR);
1133 val |= L3C_UCERREN | L3C_CERREN;
1134 /* On disable, we just disable interrupt but keep error enabled */
1135 if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
1136 if (enable)
1137 val |= L3C_ECR_UCINTREN | L3C_ECR_CINTREN;
1138 else
1139 val &= ~(L3C_ECR_UCINTREN | L3C_ECR_CINTREN);
1140 }
1141 writel(val, ctx->dev_csr + L3C_ECR);
1142
1143 if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
1144 /* Enable/disable L3 error top level interrupt */
1145 if (enable) {
1146 xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
1147 L3C_UNCORR_ERR_MASK);
1148 xgene_edac_pcp_clrbits(ctx->edac, PCPLPERRINTMSK,
1149 L3C_CORR_ERR_MASK);
1150 } else {
1151 xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
1152 L3C_UNCORR_ERR_MASK);
1153 xgene_edac_pcp_setbits(ctx->edac, PCPLPERRINTMSK,
1154 L3C_CORR_ERR_MASK);
1155 }
1156 }
1157}
1158
1159static ssize_t xgene_edac_l3_inject_ctrl_write(struct file *file,
1160 const char __user *data,
1161 size_t count, loff_t *ppos)
1162{
1163 struct edac_device_ctl_info *edac_dev = file->private_data;
1164 struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
1165
1166 /* Generate all errors */
1167 writel(0xFFFFFFFF, ctx->dev_csr + L3C_ESR);
1168 return count;
1169}
1170
1171static const struct file_operations xgene_edac_l3_debug_inject_fops = {
1172 .open = simple_open,
1173 .write = xgene_edac_l3_inject_ctrl_write,
1174 .llseek = generic_file_llseek
1175};
1176
1177static void
1178xgene_edac_l3_create_debugfs_nodes(struct edac_device_ctl_info *edac_dev)
1179{
1180 struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
1181 struct dentry *dbgfs_dir;
1182 char name[10];
1183
1184 if (!IS_ENABLED(CONFIG_EDAC_DEBUG) || !ctx->edac->dfs)
1185 return;
1186
1187 snprintf(name, sizeof(name), "l3c%d", ctx->edac_idx);
1188 dbgfs_dir = edac_debugfs_create_dir_at(name, ctx->edac->dfs);
1189 if (!dbgfs_dir)
1190 return;
1191
1192 debugfs_create_file("l3_inject_ctrl", S_IWUSR, dbgfs_dir, edac_dev,
1193 &xgene_edac_l3_debug_inject_fops);
1194}
1195
1196static int xgene_edac_l3_add(struct xgene_edac *edac, struct device_node *np,
1197 int version)
1198{
1199 struct edac_device_ctl_info *edac_dev;
1200 struct xgene_edac_dev_ctx *ctx;
1201 struct resource res;
1202 void __iomem *dev_csr;
1203 int edac_idx;
1204 int rc = 0;
1205
1206 if (!devres_open_group(edac->dev, xgene_edac_l3_add, GFP_KERNEL))
1207 return -ENOMEM;
1208
1209 rc = of_address_to_resource(np, 0, &res);
1210 if (rc < 0) {
1211 dev_err(edac->dev, "no L3 resource address\n");
1212 goto err_release_group;
1213 }
1214 dev_csr = devm_ioremap_resource(edac->dev, &res);
1215 if (IS_ERR(dev_csr)) {
1216 dev_err(edac->dev,
1217 "devm_ioremap_resource failed for L3 resource address\n");
1218 rc = PTR_ERR(dev_csr);
1219 goto err_release_group;
1220 }
1221
1222 edac_idx = edac_device_alloc_index();
1223 edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
1224 "l3c", 1, "l3c", 1, 0, NULL, 0,
1225 edac_idx);
1226 if (!edac_dev) {
1227 rc = -ENOMEM;
1228 goto err_release_group;
1229 }
1230
1231 ctx = edac_dev->pvt_info;
1232 ctx->dev_csr = dev_csr;
1233 ctx->name = "xgene_l3_err";
1234 ctx->edac_idx = edac_idx;
1235 ctx->edac = edac;
1236 ctx->edac_dev = edac_dev;
1237 ctx->ddev = *edac->dev;
1238 ctx->version = version;
1239 edac_dev->dev = &ctx->ddev;
1240 edac_dev->ctl_name = ctx->name;
1241 edac_dev->dev_name = ctx->name;
1242 edac_dev->mod_name = EDAC_MOD_STR;
1243
1244 if (edac_op_state == EDAC_OPSTATE_POLL)
1245 edac_dev->edac_check = xgene_edac_l3_check;
1246
1247 xgene_edac_l3_create_debugfs_nodes(edac_dev);
1248
1249 rc = edac_device_add_device(edac_dev);
1250 if (rc > 0) {
1251 dev_err(edac->dev, "failed edac_device_add_device()\n");
1252 rc = -ENOMEM;
1253 goto err_ctl_free;
1254 }
1255
1256 if (edac_op_state == EDAC_OPSTATE_INT)
1257 edac_dev->op_state = OP_RUNNING_INTERRUPT;
1258
1259 list_add(&ctx->next, &edac->l3s);
1260
1261 xgene_edac_l3_hw_init(edac_dev, 1);
1262
1263 devres_remove_group(edac->dev, xgene_edac_l3_add);
1264
1265 dev_info(edac->dev, "X-Gene EDAC L3 registered\n");
1266 return 0;
1267
1268err_ctl_free:
1269 edac_device_free_ctl_info(edac_dev);
1270err_release_group:
1271 devres_release_group(edac->dev, xgene_edac_l3_add);
1272 return rc;
1273}
1274
1275static int xgene_edac_l3_remove(struct xgene_edac_dev_ctx *l3)
1276{
1277 struct edac_device_ctl_info *edac_dev = l3->edac_dev;
1278
1279 xgene_edac_l3_hw_init(edac_dev, 0);
1280 edac_device_del_device(l3->edac->dev);
1281 edac_device_free_ctl_info(edac_dev);
1282 return 0;
1283}
1284
1285/* SoC error device */
1286#define IOBAXIS0TRANSERRINTSTS 0x0000
1287#define IOBAXIS0_M_ILLEGAL_ACCESS_MASK BIT(1)
1288#define IOBAXIS0_ILLEGAL_ACCESS_MASK BIT(0)
1289#define IOBAXIS0TRANSERRINTMSK 0x0004
1290#define IOBAXIS0TRANSERRREQINFOL 0x0008
1291#define IOBAXIS0TRANSERRREQINFOH 0x000c
1292#define REQTYPE_RD(src) (((src) & BIT(0)))
1293#define ERRADDRH_RD(src) (((src) & 0xffc00000) >> 22)
1294#define IOBAXIS1TRANSERRINTSTS 0x0010
1295#define IOBAXIS1TRANSERRINTMSK 0x0014
1296#define IOBAXIS1TRANSERRREQINFOL 0x0018
1297#define IOBAXIS1TRANSERRREQINFOH 0x001c
1298#define IOBPATRANSERRINTSTS 0x0020
1299#define IOBPA_M_REQIDRAM_CORRUPT_MASK BIT(7)
1300#define IOBPA_REQIDRAM_CORRUPT_MASK BIT(6)
1301#define IOBPA_M_TRANS_CORRUPT_MASK BIT(5)
1302#define IOBPA_TRANS_CORRUPT_MASK BIT(4)
1303#define IOBPA_M_WDATA_CORRUPT_MASK BIT(3)
1304#define IOBPA_WDATA_CORRUPT_MASK BIT(2)
1305#define IOBPA_M_RDATA_CORRUPT_MASK BIT(1)
1306#define IOBPA_RDATA_CORRUPT_MASK BIT(0)
1307#define IOBBATRANSERRINTSTS 0x0030
1308#define M_ILLEGAL_ACCESS_MASK BIT(15)
1309#define ILLEGAL_ACCESS_MASK BIT(14)
1310#define M_WIDRAM_CORRUPT_MASK BIT(13)
1311#define WIDRAM_CORRUPT_MASK BIT(12)
1312#define M_RIDRAM_CORRUPT_MASK BIT(11)
1313#define RIDRAM_CORRUPT_MASK BIT(10)
1314#define M_TRANS_CORRUPT_MASK BIT(9)
1315#define TRANS_CORRUPT_MASK BIT(8)
1316#define M_WDATA_CORRUPT_MASK BIT(7)
1317#define WDATA_CORRUPT_MASK BIT(6)
1318#define M_RBM_POISONED_REQ_MASK BIT(5)
1319#define RBM_POISONED_REQ_MASK BIT(4)
1320#define M_XGIC_POISONED_REQ_MASK BIT(3)
1321#define XGIC_POISONED_REQ_MASK BIT(2)
1322#define M_WRERR_RESP_MASK BIT(1)
1323#define WRERR_RESP_MASK BIT(0)
1324#define IOBBATRANSERRREQINFOL 0x0038
1325#define IOBBATRANSERRREQINFOH 0x003c
1326#define REQTYPE_F2_RD(src) ((src) & BIT(0))
1327#define ERRADDRH_F2_RD(src) (((src) & 0xffc00000) >> 22)
1328#define IOBBATRANSERRCSWREQID 0x0040
1329#define XGICTRANSERRINTSTS 0x0050
1330#define M_WR_ACCESS_ERR_MASK BIT(3)
1331#define WR_ACCESS_ERR_MASK BIT(2)
1332#define M_RD_ACCESS_ERR_MASK BIT(1)
1333#define RD_ACCESS_ERR_MASK BIT(0)
1334#define XGICTRANSERRINTMSK 0x0054
1335#define XGICTRANSERRREQINFO 0x0058
1336#define REQTYPE_MASK BIT(26)
1337#define ERRADDR_RD(src) ((src) & 0x03ffffff)
1338#define GLBL_ERR_STS 0x0800
1339#define MDED_ERR_MASK BIT(3)
1340#define DED_ERR_MASK BIT(2)
1341#define MSEC_ERR_MASK BIT(1)
1342#define SEC_ERR_MASK BIT(0)
1343#define GLBL_SEC_ERRL 0x0810
1344#define GLBL_SEC_ERRH 0x0818
1345#define GLBL_MSEC_ERRL 0x0820
1346#define GLBL_MSEC_ERRH 0x0828
1347#define GLBL_DED_ERRL 0x0830
1348#define GLBL_DED_ERRLMASK 0x0834
1349#define GLBL_DED_ERRH 0x0838
1350#define GLBL_DED_ERRHMASK 0x083c
1351#define GLBL_MDED_ERRL 0x0840
1352#define GLBL_MDED_ERRLMASK 0x0844
1353#define GLBL_MDED_ERRH 0x0848
1354#define GLBL_MDED_ERRHMASK 0x084c
1355
1356static const char * const soc_mem_err_v1[] = {
1357 "10GbE0",
1358 "10GbE1",
1359 "Security",
1360 "SATA45",
1361 "SATA23/ETH23",
1362 "SATA01/ETH01",
1363 "USB1",
1364 "USB0",
1365 "QML",
1366 "QM0",
1367 "QM1 (XGbE01)",
1368 "PCIE4",
1369 "PCIE3",
1370 "PCIE2",
1371 "PCIE1",
1372 "PCIE0",
1373 "CTX Manager",
1374 "OCM",
1375 "1GbE",
1376 "CLE",
1377 "AHBC",
1378 "PktDMA",
1379 "GFC",
1380 "MSLIM",
1381 "10GbE2",
1382 "10GbE3",
1383 "QM2 (XGbE23)",
1384 "IOB",
1385 "unknown",
1386 "unknown",
1387 "unknown",
1388 "unknown",
1389};
1390
1391static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev)
1392{
1393 struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
1394 u32 err_addr_lo;
1395 u32 err_addr_hi;
1396 u32 reg;
1397 u32 info;
1398
1399 /* GIC transaction error interrupt */
1400 reg = readl(ctx->dev_csr + XGICTRANSERRINTSTS);
1401 if (!reg)
1402 goto chk_iob_err;
1403 dev_err(edac_dev->dev, "XGIC transaction error\n");
1404 if (reg & RD_ACCESS_ERR_MASK)
1405 dev_err(edac_dev->dev, "XGIC read size error\n");
1406 if (reg & M_RD_ACCESS_ERR_MASK)
1407 dev_err(edac_dev->dev, "Multiple XGIC read size error\n");
1408 if (reg & WR_ACCESS_ERR_MASK)
1409 dev_err(edac_dev->dev, "XGIC write size error\n");
1410 if (reg & M_WR_ACCESS_ERR_MASK)
1411 dev_err(edac_dev->dev, "Multiple XGIC write size error\n");
1412 info = readl(ctx->dev_csr + XGICTRANSERRREQINFO);
1413 dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n",
1414 info & REQTYPE_MASK ? "read" : "write", ERRADDR_RD(info),
1415 info);
1416 writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS);
1417
1418chk_iob_err:
1419 /* IOB memory error */
1420 reg = readl(ctx->dev_csr + GLBL_ERR_STS);
1421 if (!reg)
1422 return;
1423 if (reg & SEC_ERR_MASK) {
1424 err_addr_lo = readl(ctx->dev_csr + GLBL_SEC_ERRL);
1425 err_addr_hi = readl(ctx->dev_csr + GLBL_SEC_ERRH);
1426 dev_err(edac_dev->dev,
1427 "IOB single-bit correctable memory at 0x%08X.%08X error\n",
1428 err_addr_lo, err_addr_hi);
1429 writel(err_addr_lo, ctx->dev_csr + GLBL_SEC_ERRL);
1430 writel(err_addr_hi, ctx->dev_csr + GLBL_SEC_ERRH);
1431 }
1432 if (reg & MSEC_ERR_MASK) {
1433 err_addr_lo = readl(ctx->dev_csr + GLBL_MSEC_ERRL);
1434 err_addr_hi = readl(ctx->dev_csr + GLBL_MSEC_ERRH);
1435 dev_err(edac_dev->dev,
1436 "IOB multiple single-bit correctable memory at 0x%08X.%08X error\n",
1437 err_addr_lo, err_addr_hi);
1438 writel(err_addr_lo, ctx->dev_csr + GLBL_MSEC_ERRL);
1439 writel(err_addr_hi, ctx->dev_csr + GLBL_MSEC_ERRH);
1440 }
1441 if (reg & (SEC_ERR_MASK | MSEC_ERR_MASK))
1442 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
1443
1444 if (reg & DED_ERR_MASK) {
1445 err_addr_lo = readl(ctx->dev_csr + GLBL_DED_ERRL);
1446 err_addr_hi = readl(ctx->dev_csr + GLBL_DED_ERRH);
1447 dev_err(edac_dev->dev,
1448 "IOB double-bit uncorrectable memory at 0x%08X.%08X error\n",
1449 err_addr_lo, err_addr_hi);
1450 writel(err_addr_lo, ctx->dev_csr + GLBL_DED_ERRL);
1451 writel(err_addr_hi, ctx->dev_csr + GLBL_DED_ERRH);
1452 }
1453 if (reg & MDED_ERR_MASK) {
1454 err_addr_lo = readl(ctx->dev_csr + GLBL_MDED_ERRL);
1455 err_addr_hi = readl(ctx->dev_csr + GLBL_MDED_ERRH);
1456 dev_err(edac_dev->dev,
1457 "Multiple IOB double-bit uncorrectable memory at 0x%08X.%08X error\n",
1458 err_addr_lo, err_addr_hi);
1459 writel(err_addr_lo, ctx->dev_csr + GLBL_MDED_ERRL);
1460 writel(err_addr_hi, ctx->dev_csr + GLBL_MDED_ERRH);
1461 }
1462 if (reg & (DED_ERR_MASK | MDED_ERR_MASK))
1463 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
1464}
1465
1466static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
1467{
1468 struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
1469 u32 err_addr_lo;
1470 u32 err_addr_hi;
1471 u32 reg;
1472
1473 /* IOB Bridge agent transaction error interrupt */
1474 reg = readl(ctx->dev_csr + IOBBATRANSERRINTSTS);
1475 if (!reg)
1476 return;
1477
1478 dev_err(edac_dev->dev, "IOB bridge agent (BA) transaction error\n");
1479 if (reg & WRERR_RESP_MASK)
1480 dev_err(edac_dev->dev, "IOB BA write response error\n");
1481 if (reg & M_WRERR_RESP_MASK)
1482 dev_err(edac_dev->dev,
1483 "Multiple IOB BA write response error\n");
1484 if (reg & XGIC_POISONED_REQ_MASK)
1485 dev_err(edac_dev->dev, "IOB BA XGIC poisoned write error\n");
1486 if (reg & M_XGIC_POISONED_REQ_MASK)
1487 dev_err(edac_dev->dev,
1488 "Multiple IOB BA XGIC poisoned write error\n");
1489 if (reg & RBM_POISONED_REQ_MASK)
1490 dev_err(edac_dev->dev, "IOB BA RBM poisoned write error\n");
1491 if (reg & M_RBM_POISONED_REQ_MASK)
1492 dev_err(edac_dev->dev,
1493 "Multiple IOB BA RBM poisoned write error\n");
1494 if (reg & WDATA_CORRUPT_MASK)
1495 dev_err(edac_dev->dev, "IOB BA write error\n");
1496 if (reg & M_WDATA_CORRUPT_MASK)
1497 dev_err(edac_dev->dev, "Multiple IOB BA write error\n");
1498 if (reg & TRANS_CORRUPT_MASK)
1499 dev_err(edac_dev->dev, "IOB BA transaction error\n");
1500 if (reg & M_TRANS_CORRUPT_MASK)
1501 dev_err(edac_dev->dev, "Multiple IOB BA transaction error\n");
1502 if (reg & RIDRAM_CORRUPT_MASK)
1503 dev_err(edac_dev->dev,
1504 "IOB BA RDIDRAM read transaction ID error\n");
1505 if (reg & M_RIDRAM_CORRUPT_MASK)
1506 dev_err(edac_dev->dev,
1507 "Multiple IOB BA RDIDRAM read transaction ID error\n");
1508 if (reg & WIDRAM_CORRUPT_MASK)
1509 dev_err(edac_dev->dev,
1510 "IOB BA RDIDRAM write transaction ID error\n");
1511 if (reg & M_WIDRAM_CORRUPT_MASK)
1512 dev_err(edac_dev->dev,
1513 "Multiple IOB BA RDIDRAM write transaction ID error\n");
1514 if (reg & ILLEGAL_ACCESS_MASK)
1515 dev_err(edac_dev->dev,
1516 "IOB BA XGIC/RB illegal access error\n");
1517 if (reg & M_ILLEGAL_ACCESS_MASK)
1518 dev_err(edac_dev->dev,
1519 "Multiple IOB BA XGIC/RB illegal access error\n");
1520
1521 err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL);
1522 err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH);
1523 dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n",
1524 REQTYPE_F2_RD(err_addr_hi) ? "read" : "write",
1525 ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi);
1526 if (reg & WRERR_RESP_MASK)
1527 dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n",
1528 readl(ctx->dev_csr + IOBBATRANSERRCSWREQID));
1529 writel(reg, ctx->dev_csr + IOBBATRANSERRINTSTS);
1530}
1531
1532static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
1533{
1534 struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
1535 u32 err_addr_lo;
1536 u32 err_addr_hi;
1537 u32 reg;
1538
1539 /* IOB Processing agent transaction error interrupt */
1540 reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
1541 if (!reg)
1542 goto chk_iob_axi0;
1543 dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n");
1544 if (reg & IOBPA_RDATA_CORRUPT_MASK)
1545 dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
1546 if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
1547 dev_err(edac_dev->dev,
1548 "Mutilple IOB PA read data RAM error\n");
1549 if (reg & IOBPA_WDATA_CORRUPT_MASK)
1550 dev_err(edac_dev->dev, "IOB PA write data RAM error\n");
1551 if (reg & IOBPA_M_WDATA_CORRUPT_MASK)
1552 dev_err(edac_dev->dev,
1553 "Mutilple IOB PA write data RAM error\n");
1554 if (reg & IOBPA_TRANS_CORRUPT_MASK)
1555 dev_err(edac_dev->dev, "IOB PA transaction error\n");
1556 if (reg & IOBPA_M_TRANS_CORRUPT_MASK)
1557 dev_err(edac_dev->dev, "Mutilple IOB PA transaction error\n");
1558 if (reg & IOBPA_REQIDRAM_CORRUPT_MASK)
1559 dev_err(edac_dev->dev, "IOB PA transaction ID RAM error\n");
1560 if (reg & IOBPA_M_REQIDRAM_CORRUPT_MASK)
1561 dev_err(edac_dev->dev,
1562 "Multiple IOB PA transaction ID RAM error\n");
1563 writel(reg, ctx->dev_csr + IOBPATRANSERRINTSTS);
1564
1565chk_iob_axi0:
1566 /* IOB AXI0 Error */
1567 reg = readl(ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
1568 if (!reg)
1569 goto chk_iob_axi1;
1570 err_addr_lo = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOL);
1571 err_addr_hi = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOH);
1572 dev_err(edac_dev->dev,
1573 "%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
1574 reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
1575 REQTYPE_RD(err_addr_hi) ? "read" : "write",
1576 ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
1577 writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
1578
1579chk_iob_axi1:
1580 /* IOB AXI1 Error */
1581 reg = readl(ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
1582 if (!reg)
1583 return;
1584 err_addr_lo = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOL);
1585 err_addr_hi = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOH);
1586 dev_err(edac_dev->dev,
1587 "%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
1588 reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
1589 REQTYPE_RD(err_addr_hi) ? "read" : "write",
1590 ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
1591 writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
1592}
1593
1594static void xgene_edac_soc_check(struct edac_device_ctl_info *edac_dev)
1595{
1596 struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
1597 const char * const *soc_mem_err = NULL;
1598 u32 pcp_hp_stat;
1599 u32 pcp_lp_stat;
1600 u32 reg;
1601 int i;
1602
1603 xgene_edac_pcp_rd(ctx->edac, PCPHPERRINTSTS, &pcp_hp_stat);
1604 xgene_edac_pcp_rd(ctx->edac, PCPLPERRINTSTS, &pcp_lp_stat);
1605 xgene_edac_pcp_rd(ctx->edac, MEMERRINTSTS, &reg);
1606 if (!((pcp_hp_stat & (IOB_PA_ERR_MASK | IOB_BA_ERR_MASK |
1607 IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK)) ||
1608 (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) || reg))
1609 return;
1610
1611 if (pcp_hp_stat & IOB_XGIC_ERR_MASK)
1612 xgene_edac_iob_gic_report(edac_dev);
1613
1614 if (pcp_hp_stat & (IOB_RB_ERR_MASK | IOB_BA_ERR_MASK))
1615 xgene_edac_rb_report(edac_dev);
1616
1617 if (pcp_hp_stat & IOB_PA_ERR_MASK)
1618 xgene_edac_pa_report(edac_dev);
1619
1620 if (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) {
1621 dev_info(edac_dev->dev,
1622 "CSW switch trace correctable memory parity error\n");
1623 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
1624 }
1625
1626 if (!reg)
1627 return;
1628 if (ctx->version == 1)
1629 soc_mem_err = soc_mem_err_v1;
1630 if (!soc_mem_err) {
1631 dev_err(edac_dev->dev, "SoC memory parity error 0x%08X\n",
1632 reg);
1633 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
1634 return;
1635 }
1636 for (i = 0; i < 31; i++) {
1637 if (reg & (1 << i)) {
1638 dev_err(edac_dev->dev, "%s memory parity error\n",
1639 soc_mem_err[i]);
1640 edac_device_handle_ue(edac_dev, 0, 0,
1641 edac_dev->ctl_name);
1642 }
1643 }
1644}
1645
1646static void xgene_edac_soc_hw_init(struct edac_device_ctl_info *edac_dev,
1647 bool enable)
1648{
1649 struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
1650
1651 /* Enable SoC IP error interrupt */
1652 if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
1653 if (enable) {
1654 xgene_edac_pcp_clrbits(ctx->edac, PCPHPERRINTMSK,
1655 IOB_PA_ERR_MASK |
1656 IOB_BA_ERR_MASK |
1657 IOB_XGIC_ERR_MASK |
1658 IOB_RB_ERR_MASK);
1659 xgene_edac_pcp_clrbits(ctx->edac, PCPLPERRINTMSK,
1660 CSW_SWITCH_TRACE_ERR_MASK);
1661 } else {
1662 xgene_edac_pcp_setbits(ctx->edac, PCPHPERRINTMSK,
1663 IOB_PA_ERR_MASK |
1664 IOB_BA_ERR_MASK |
1665 IOB_XGIC_ERR_MASK |
1666 IOB_RB_ERR_MASK);
1667 xgene_edac_pcp_setbits(ctx->edac, PCPLPERRINTMSK,
1668 CSW_SWITCH_TRACE_ERR_MASK);
1669 }
1670
1671 writel(enable ? 0x0 : 0xFFFFFFFF,
1672 ctx->dev_csr + IOBAXIS0TRANSERRINTMSK);
1673 writel(enable ? 0x0 : 0xFFFFFFFF,
1674 ctx->dev_csr + IOBAXIS1TRANSERRINTMSK);
1675 writel(enable ? 0x0 : 0xFFFFFFFF,
1676 ctx->dev_csr + XGICTRANSERRINTMSK);
1677
1678 xgene_edac_pcp_setbits(ctx->edac, MEMERRINTMSK,
1679 enable ? 0x0 : 0xFFFFFFFF);
1680 }
1681}
1682
1683static int xgene_edac_soc_add(struct xgene_edac *edac, struct device_node *np,
1684 int version)
1685{
1686 struct edac_device_ctl_info *edac_dev;
1687 struct xgene_edac_dev_ctx *ctx;
1688 void __iomem *dev_csr;
1689 struct resource res;
1690 int edac_idx;
1691 int rc;
1692
1693 if (!devres_open_group(edac->dev, xgene_edac_soc_add, GFP_KERNEL))
1694 return -ENOMEM;
1695
1696 rc = of_address_to_resource(np, 0, &res);
1697 if (rc < 0) {
1698 dev_err(edac->dev, "no SoC resource address\n");
1699 goto err_release_group;
1700 }
1701 dev_csr = devm_ioremap_resource(edac->dev, &res);
1702 if (IS_ERR(dev_csr)) {
1703 dev_err(edac->dev,
1704 "devm_ioremap_resource failed for soc resource address\n");
1705 rc = PTR_ERR(dev_csr);
1706 goto err_release_group;
1707 }
1708
1709 edac_idx = edac_device_alloc_index();
1710 edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
1711 "SOC", 1, "SOC", 1, 2, NULL, 0,
1712 edac_idx);
1713 if (!edac_dev) {
1714 rc = -ENOMEM;
1715 goto err_release_group;
1716 }
1717
1718 ctx = edac_dev->pvt_info;
1719 ctx->dev_csr = dev_csr;
1720 ctx->name = "xgene_soc_err";
1721 ctx->edac_idx = edac_idx;
1722 ctx->edac = edac;
1723 ctx->edac_dev = edac_dev;
1724 ctx->ddev = *edac->dev;
1725 ctx->version = version;
1726 edac_dev->dev = &ctx->ddev;
1727 edac_dev->ctl_name = ctx->name;
1728 edac_dev->dev_name = ctx->name;
1729 edac_dev->mod_name = EDAC_MOD_STR;
1730
1731 if (edac_op_state == EDAC_OPSTATE_POLL)
1732 edac_dev->edac_check = xgene_edac_soc_check;
1733
1734 rc = edac_device_add_device(edac_dev);
1735 if (rc > 0) {
1736 dev_err(edac->dev, "failed edac_device_add_device()\n");
1737 rc = -ENOMEM;
1738 goto err_ctl_free;
1739 }
1740
1741 if (edac_op_state == EDAC_OPSTATE_INT)
1742 edac_dev->op_state = OP_RUNNING_INTERRUPT;
1743
1744 list_add(&ctx->next, &edac->socs);
1745
1746 xgene_edac_soc_hw_init(edac_dev, 1);
1747
1748 devres_remove_group(edac->dev, xgene_edac_soc_add);
1749
1750 dev_info(edac->dev, "X-Gene EDAC SoC registered\n");
1751
1752 return 0;
1753
1754err_ctl_free:
1755 edac_device_free_ctl_info(edac_dev);
1756err_release_group:
1757 devres_release_group(edac->dev, xgene_edac_soc_add);
1758 return rc;
1759}
1760
1761static int xgene_edac_soc_remove(struct xgene_edac_dev_ctx *soc)
1762{
1763 struct edac_device_ctl_info *edac_dev = soc->edac_dev;
1764
1765 xgene_edac_soc_hw_init(edac_dev, 0);
1766 edac_device_del_device(soc->edac->dev);
1767 edac_device_free_ctl_info(edac_dev);
1768 return 0;
1769}
1770
1019static irqreturn_t xgene_edac_isr(int irq, void *dev_id) 1771static irqreturn_t xgene_edac_isr(int irq, void *dev_id)
1020{ 1772{
1021 struct xgene_edac *ctx = dev_id; 1773 struct xgene_edac *ctx = dev_id;
1022 struct xgene_edac_pmd_ctx *pmd; 1774 struct xgene_edac_pmd_ctx *pmd;
1775 struct xgene_edac_dev_ctx *node;
1023 unsigned int pcp_hp_stat; 1776 unsigned int pcp_hp_stat;
1024 unsigned int pcp_lp_stat; 1777 unsigned int pcp_lp_stat;
1025 1778
@@ -1030,9 +1783,8 @@ static irqreturn_t xgene_edac_isr(int irq, void *dev_id)
1030 (MCU_CORR_ERR_MASK & pcp_lp_stat)) { 1783 (MCU_CORR_ERR_MASK & pcp_lp_stat)) {
1031 struct xgene_edac_mc_ctx *mcu; 1784 struct xgene_edac_mc_ctx *mcu;
1032 1785
1033 list_for_each_entry(mcu, &ctx->mcus, next) { 1786 list_for_each_entry(mcu, &ctx->mcus, next)
1034 xgene_edac_mc_check(mcu->mci); 1787 xgene_edac_mc_check(mcu->mci);
1035 }
1036 } 1788 }
1037 1789
1038 list_for_each_entry(pmd, &ctx->pmds, next) { 1790 list_for_each_entry(pmd, &ctx->pmds, next) {
@@ -1040,6 +1792,12 @@ static irqreturn_t xgene_edac_isr(int irq, void *dev_id)
1040 xgene_edac_pmd_check(pmd->edac_dev); 1792 xgene_edac_pmd_check(pmd->edac_dev);
1041 } 1793 }
1042 1794
1795 list_for_each_entry(node, &ctx->l3s, next)
1796 xgene_edac_l3_check(node->edac_dev);
1797
1798 list_for_each_entry(node, &ctx->socs, next)
1799 xgene_edac_soc_check(node->edac_dev);
1800
1043 return IRQ_HANDLED; 1801 return IRQ_HANDLED;
1044} 1802}
1045 1803
@@ -1058,6 +1816,8 @@ static int xgene_edac_probe(struct platform_device *pdev)
1058 platform_set_drvdata(pdev, edac); 1816 platform_set_drvdata(pdev, edac);
1059 INIT_LIST_HEAD(&edac->mcus); 1817 INIT_LIST_HEAD(&edac->mcus);
1060 INIT_LIST_HEAD(&edac->pmds); 1818 INIT_LIST_HEAD(&edac->pmds);
1819 INIT_LIST_HEAD(&edac->l3s);
1820 INIT_LIST_HEAD(&edac->socs);
1061 spin_lock_init(&edac->lock); 1821 spin_lock_init(&edac->lock);
1062 mutex_init(&edac->mc_lock); 1822 mutex_init(&edac->mc_lock);
1063 1823
@@ -1122,6 +1882,8 @@ static int xgene_edac_probe(struct platform_device *pdev)
1122 } 1882 }
1123 } 1883 }
1124 1884
1885 edac->dfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
1886
1125 for_each_child_of_node(pdev->dev.of_node, child) { 1887 for_each_child_of_node(pdev->dev.of_node, child) {
1126 if (!of_device_is_available(child)) 1888 if (!of_device_is_available(child))
1127 continue; 1889 continue;
@@ -1131,6 +1893,14 @@ static int xgene_edac_probe(struct platform_device *pdev)
1131 xgene_edac_pmd_add(edac, child, 1); 1893 xgene_edac_pmd_add(edac, child, 1);
1132 if (of_device_is_compatible(child, "apm,xgene-edac-pmd-v2")) 1894 if (of_device_is_compatible(child, "apm,xgene-edac-pmd-v2"))
1133 xgene_edac_pmd_add(edac, child, 2); 1895 xgene_edac_pmd_add(edac, child, 2);
1896 if (of_device_is_compatible(child, "apm,xgene-edac-l3"))
1897 xgene_edac_l3_add(edac, child, 1);
1898 if (of_device_is_compatible(child, "apm,xgene-edac-l3-v2"))
1899 xgene_edac_l3_add(edac, child, 2);
1900 if (of_device_is_compatible(child, "apm,xgene-edac-soc"))
1901 xgene_edac_soc_add(edac, child, 0);
1902 if (of_device_is_compatible(child, "apm,xgene-edac-soc-v1"))
1903 xgene_edac_soc_add(edac, child, 1);
1134 } 1904 }
1135 1905
1136 return 0; 1906 return 0;
@@ -1146,14 +1916,21 @@ static int xgene_edac_remove(struct platform_device *pdev)
1146 struct xgene_edac_mc_ctx *temp_mcu; 1916 struct xgene_edac_mc_ctx *temp_mcu;
1147 struct xgene_edac_pmd_ctx *pmd; 1917 struct xgene_edac_pmd_ctx *pmd;
1148 struct xgene_edac_pmd_ctx *temp_pmd; 1918 struct xgene_edac_pmd_ctx *temp_pmd;
1919 struct xgene_edac_dev_ctx *node;
1920 struct xgene_edac_dev_ctx *temp_node;
1149 1921
1150 list_for_each_entry_safe(mcu, temp_mcu, &edac->mcus, next) { 1922 list_for_each_entry_safe(mcu, temp_mcu, &edac->mcus, next)
1151 xgene_edac_mc_remove(mcu); 1923 xgene_edac_mc_remove(mcu);
1152 }
1153 1924
1154 list_for_each_entry_safe(pmd, temp_pmd, &edac->pmds, next) { 1925 list_for_each_entry_safe(pmd, temp_pmd, &edac->pmds, next)
1155 xgene_edac_pmd_remove(pmd); 1926 xgene_edac_pmd_remove(pmd);
1156 } 1927
1928 list_for_each_entry_safe(node, temp_node, &edac->l3s, next)
1929 xgene_edac_l3_remove(node);
1930
1931 list_for_each_entry_safe(node, temp_node, &edac->socs, next)
1932 xgene_edac_soc_remove(node);
1933
1157 return 0; 1934 return 0;
1158} 1935}
1159 1936
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 84533e02fbf8..e1670d533f97 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -52,6 +52,28 @@ config EFI_RUNTIME_MAP
52 52
53 See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map. 53 See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map.
54 54
55config EFI_FAKE_MEMMAP
56 bool "Enable EFI fake memory map"
57 depends on EFI && X86
58 default n
59 help
60 Saying Y here will enable "efi_fake_mem" boot option.
61 By specifying this parameter, you can add arbitrary attribute
62 to specific memory range by updating original (firmware provided)
63 EFI memmap.
64 This is useful for debugging of EFI memmap related feature.
65 e.g. Address Range Mirroring feature.
66
67config EFI_MAX_FAKE_MEM
68 int "maximum allowable number of ranges in efi_fake_mem boot option"
69 depends on EFI_FAKE_MEMMAP
70 range 1 128
71 default 8
72 help
73 Maximum allowable number of ranges in efi_fake_mem boot option.
74 Ranges can be set up to this value using comma-separated list.
75 The default value is 8.
76
55config EFI_PARAMS_FROM_FDT 77config EFI_PARAMS_FROM_FDT
56 bool 78 bool
57 help 79 help
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 6fd3da938717..c24f00569acb 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_UEFI_CPER) += cper.o
9obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o 9obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
10obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o 10obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
11obj-$(CONFIG_EFI_STUB) += libstub/ 11obj-$(CONFIG_EFI_STUB) += libstub/
12obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index e992abc5ef26..c8d794c58479 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -400,3 +400,4 @@ module_exit(efivars_pstore_exit);
400 400
401MODULE_DESCRIPTION("EFI variable backend for pstore"); 401MODULE_DESCRIPTION("EFI variable backend for pstore");
402MODULE_LICENSE("GPL"); 402MODULE_LICENSE("GPL");
403MODULE_ALIAS("platform:efivars");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index d6144e3b97c5..027ca212179f 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -26,20 +26,21 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27 27
28struct efi __read_mostly efi = { 28struct efi __read_mostly efi = {
29 .mps = EFI_INVALID_TABLE_ADDR, 29 .mps = EFI_INVALID_TABLE_ADDR,
30 .acpi = EFI_INVALID_TABLE_ADDR, 30 .acpi = EFI_INVALID_TABLE_ADDR,
31 .acpi20 = EFI_INVALID_TABLE_ADDR, 31 .acpi20 = EFI_INVALID_TABLE_ADDR,
32 .smbios = EFI_INVALID_TABLE_ADDR, 32 .smbios = EFI_INVALID_TABLE_ADDR,
33 .smbios3 = EFI_INVALID_TABLE_ADDR, 33 .smbios3 = EFI_INVALID_TABLE_ADDR,
34 .sal_systab = EFI_INVALID_TABLE_ADDR, 34 .sal_systab = EFI_INVALID_TABLE_ADDR,
35 .boot_info = EFI_INVALID_TABLE_ADDR, 35 .boot_info = EFI_INVALID_TABLE_ADDR,
36 .hcdp = EFI_INVALID_TABLE_ADDR, 36 .hcdp = EFI_INVALID_TABLE_ADDR,
37 .uga = EFI_INVALID_TABLE_ADDR, 37 .uga = EFI_INVALID_TABLE_ADDR,
38 .uv_systab = EFI_INVALID_TABLE_ADDR, 38 .uv_systab = EFI_INVALID_TABLE_ADDR,
39 .fw_vendor = EFI_INVALID_TABLE_ADDR, 39 .fw_vendor = EFI_INVALID_TABLE_ADDR,
40 .runtime = EFI_INVALID_TABLE_ADDR, 40 .runtime = EFI_INVALID_TABLE_ADDR,
41 .config_table = EFI_INVALID_TABLE_ADDR, 41 .config_table = EFI_INVALID_TABLE_ADDR,
42 .esrt = EFI_INVALID_TABLE_ADDR, 42 .esrt = EFI_INVALID_TABLE_ADDR,
43 .properties_table = EFI_INVALID_TABLE_ADDR,
43}; 44};
44EXPORT_SYMBOL(efi); 45EXPORT_SYMBOL(efi);
45 46
@@ -63,6 +64,9 @@ static int __init parse_efi_cmdline(char *str)
63 return -EINVAL; 64 return -EINVAL;
64 } 65 }
65 66
67 if (parse_option_str(str, "debug"))
68 set_bit(EFI_DBG, &efi.flags);
69
66 if (parse_option_str(str, "noruntime")) 70 if (parse_option_str(str, "noruntime"))
67 disable_runtime = true; 71 disable_runtime = true;
68 72
@@ -250,7 +254,7 @@ subsys_initcall(efisubsys_init);
250int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 254int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
251{ 255{
252 struct efi_memory_map *map = efi.memmap; 256 struct efi_memory_map *map = efi.memmap;
253 void *p, *e; 257 phys_addr_t p, e;
254 258
255 if (!efi_enabled(EFI_MEMMAP)) { 259 if (!efi_enabled(EFI_MEMMAP)) {
256 pr_err_once("EFI_MEMMAP is not enabled.\n"); 260 pr_err_once("EFI_MEMMAP is not enabled.\n");
@@ -282,10 +286,10 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
282 * So just always get our own virtual map on the CPU. 286 * So just always get our own virtual map on the CPU.
283 * 287 *
284 */ 288 */
285 md = early_memremap((phys_addr_t)p, sizeof (*md)); 289 md = early_memremap(p, sizeof (*md));
286 if (!md) { 290 if (!md) {
287 pr_err_once("early_memremap(%p, %zu) failed.\n", 291 pr_err_once("early_memremap(%pa, %zu) failed.\n",
288 p, sizeof (*md)); 292 &p, sizeof (*md));
289 return -ENOMEM; 293 return -ENOMEM;
290 } 294 }
291 295
@@ -362,6 +366,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
362 {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3}, 366 {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
363 {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga}, 367 {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
364 {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt}, 368 {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
369 {EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
365 {NULL_GUID, NULL, NULL}, 370 {NULL_GUID, NULL, NULL},
366}; 371};
367 372
@@ -421,6 +426,24 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
421 } 426 }
422 pr_cont("\n"); 427 pr_cont("\n");
423 set_bit(EFI_CONFIG_TABLES, &efi.flags); 428 set_bit(EFI_CONFIG_TABLES, &efi.flags);
429
430 /* Parse the EFI Properties table if it exists */
431 if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
432 efi_properties_table_t *tbl;
433
434 tbl = early_memremap(efi.properties_table, sizeof(*tbl));
435 if (tbl == NULL) {
436 pr_err("Could not map Properties table!\n");
437 return -ENOMEM;
438 }
439
440 if (tbl->memory_protection_attribute &
441 EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA)
442 set_bit(EFI_NX_PE_DATA, &efi.flags);
443
444 early_memunmap(tbl, sizeof(*tbl));
445 }
446
424 return 0; 447 return 0;
425} 448}
426 449
@@ -489,7 +512,6 @@ static __initdata struct {
489}; 512};
490 513
491struct param_info { 514struct param_info {
492 int verbose;
493 int found; 515 int found;
494 void *params; 516 void *params;
495}; 517};
@@ -520,21 +542,20 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
520 else 542 else
521 *(u64 *)dest = val; 543 *(u64 *)dest = val;
522 544
523 if (info->verbose) 545 if (efi_enabled(EFI_DBG))
524 pr_info(" %s: 0x%0*llx\n", dt_params[i].name, 546 pr_info(" %s: 0x%0*llx\n", dt_params[i].name,
525 dt_params[i].size * 2, val); 547 dt_params[i].size * 2, val);
526 } 548 }
527 return 1; 549 return 1;
528} 550}
529 551
530int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) 552int __init efi_get_fdt_params(struct efi_fdt_params *params)
531{ 553{
532 struct param_info info; 554 struct param_info info;
533 int ret; 555 int ret;
534 556
535 pr_info("Getting EFI parameters from FDT:\n"); 557 pr_info("Getting EFI parameters from FDT:\n");
536 558
537 info.verbose = verbose;
538 info.found = 0; 559 info.found = 0;
539 info.params = params; 560 info.params = params;
540 561
@@ -588,16 +609,19 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
588 609
589 attr = md->attribute; 610 attr = md->attribute;
590 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 611 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
591 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_WP | 612 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
592 EFI_MEMORY_RP | EFI_MEMORY_XP | EFI_MEMORY_RUNTIME)) 613 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
614 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
593 snprintf(pos, size, "|attr=0x%016llx]", 615 snprintf(pos, size, "|attr=0x%016llx]",
594 (unsigned long long)attr); 616 (unsigned long long)attr);
595 else 617 else
596 snprintf(pos, size, "|%3s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 618 snprintf(pos, size, "|%3s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
597 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 619 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
620 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
598 attr & EFI_MEMORY_XP ? "XP" : "", 621 attr & EFI_MEMORY_XP ? "XP" : "",
599 attr & EFI_MEMORY_RP ? "RP" : "", 622 attr & EFI_MEMORY_RP ? "RP" : "",
600 attr & EFI_MEMORY_WP ? "WP" : "", 623 attr & EFI_MEMORY_WP ? "WP" : "",
624 attr & EFI_MEMORY_RO ? "RO" : "",
601 attr & EFI_MEMORY_UCE ? "UCE" : "", 625 attr & EFI_MEMORY_UCE ? "UCE" : "",
602 attr & EFI_MEMORY_WB ? "WB" : "", 626 attr & EFI_MEMORY_WB ? "WB" : "",
603 attr & EFI_MEMORY_WT ? "WT" : "", 627 attr & EFI_MEMORY_WT ? "WT" : "",
@@ -605,3 +629,36 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
605 attr & EFI_MEMORY_UC ? "UC" : ""); 629 attr & EFI_MEMORY_UC ? "UC" : "");
606 return buf; 630 return buf;
607} 631}
632
633/*
634 * efi_mem_attributes - lookup memmap attributes for physical address
635 * @phys_addr: the physical address to lookup
636 *
637 * Search in the EFI memory map for the region covering
638 * @phys_addr. Returns the EFI memory attributes if the region
639 * was found in the memory map, 0 otherwise.
640 *
641 * Despite being marked __weak, most architectures should *not*
642 * override this function. It is __weak solely for the benefit
643 * of ia64 which has a funky EFI memory map that doesn't work
644 * the same way as other architectures.
645 */
646u64 __weak efi_mem_attributes(unsigned long phys_addr)
647{
648 struct efi_memory_map *map;
649 efi_memory_desc_t *md;
650 void *p;
651
652 if (!efi_enabled(EFI_MEMMAP))
653 return 0;
654
655 map = efi.memmap;
656 for (p = map->map; p < map->map_end; p += map->desc_size) {
657 md = p;
658 if ((md->phys_addr <= phys_addr) &&
659 (phys_addr < (md->phys_addr +
660 (md->num_pages << EFI_PAGE_SHIFT))))
661 return md->attribute;
662 }
663 return 0;
664}
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index a5b95d61ae71..22c5285f7705 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -20,7 +20,6 @@
20#include <linux/kobject.h> 20#include <linux/kobject.h>
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/memblock.h> 22#include <linux/memblock.h>
23#include <linux/module.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/types.h> 24#include <linux/types.h>
26 25
@@ -450,22 +449,10 @@ err:
450 esrt = NULL; 449 esrt = NULL;
451 return error; 450 return error;
452} 451}
452device_initcall(esrt_sysfs_init);
453 453
454static void __exit esrt_sysfs_exit(void) 454/*
455{
456 pr_debug("esrt-sysfs: unloading.\n");
457 cleanup_entry_list();
458 kset_unregister(esrt_kset);
459 sysfs_remove_group(esrt_kobj, &esrt_attr_group);
460 kfree(esrt);
461 esrt = NULL;
462 kobject_del(esrt_kobj);
463 kobject_put(esrt_kobj);
464}
465
466module_init(esrt_sysfs_init);
467module_exit(esrt_sysfs_exit);
468
469MODULE_AUTHOR("Peter Jones <pjones@redhat.com>"); 455MODULE_AUTHOR("Peter Jones <pjones@redhat.com>");
470MODULE_DESCRIPTION("EFI System Resource Table support"); 456MODULE_DESCRIPTION("EFI System Resource Table support");
471MODULE_LICENSE("GPL"); 457MODULE_LICENSE("GPL");
458*/
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
new file mode 100644
index 000000000000..ed3a854950cc
--- /dev/null
+++ b/drivers/firmware/efi/fake_mem.c
@@ -0,0 +1,238 @@
1/*
2 * fake_mem.c
3 *
4 * Copyright (C) 2015 FUJITSU LIMITED
5 * Author: Taku Izumi <izumi.taku@jp.fujitsu.com>
6 *
7 * This code introduces new boot option named "efi_fake_mem"
8 * By specifying this parameter, you can add arbitrary attribute to
9 * specific memory range by updating original (firmware provided) EFI
10 * memmap.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, see <http://www.gnu.org/licenses/>.
23 *
24 * The full GNU General Public License is included in this distribution in
25 * the file called "COPYING".
26 */
27
28#include <linux/kernel.h>
29#include <linux/efi.h>
30#include <linux/init.h>
31#include <linux/memblock.h>
32#include <linux/types.h>
33#include <linux/sort.h>
34#include <asm/efi.h>
35
36#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
37
38struct fake_mem {
39 struct range range;
40 u64 attribute;
41};
42static struct fake_mem fake_mems[EFI_MAX_FAKEMEM];
43static int nr_fake_mem;
44
45static int __init cmp_fake_mem(const void *x1, const void *x2)
46{
47 const struct fake_mem *m1 = x1;
48 const struct fake_mem *m2 = x2;
49
50 if (m1->range.start < m2->range.start)
51 return -1;
52 if (m1->range.start > m2->range.start)
53 return 1;
54 return 0;
55}
56
57void __init efi_fake_memmap(void)
58{
59 u64 start, end, m_start, m_end, m_attr;
60 int new_nr_map = memmap.nr_map;
61 efi_memory_desc_t *md;
62 phys_addr_t new_memmap_phy;
63 void *new_memmap;
64 void *old, *new;
65 int i;
66
67 if (!nr_fake_mem || !efi_enabled(EFI_MEMMAP))
68 return;
69
70 /* count up the number of EFI memory descriptor */
71 for (old = memmap.map; old < memmap.map_end; old += memmap.desc_size) {
72 md = old;
73 start = md->phys_addr;
74 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
75
76 for (i = 0; i < nr_fake_mem; i++) {
77 /* modifying range */
78 m_start = fake_mems[i].range.start;
79 m_end = fake_mems[i].range.end;
80
81 if (m_start <= start) {
82 /* split into 2 parts */
83 if (start < m_end && m_end < end)
84 new_nr_map++;
85 }
86 if (start < m_start && m_start < end) {
87 /* split into 3 parts */
88 if (m_end < end)
89 new_nr_map += 2;
90 /* split into 2 parts */
91 if (end <= m_end)
92 new_nr_map++;
93 }
94 }
95 }
96
97 /* allocate memory for new EFI memmap */
98 new_memmap_phy = memblock_alloc(memmap.desc_size * new_nr_map,
99 PAGE_SIZE);
100 if (!new_memmap_phy)
101 return;
102
103 /* create new EFI memmap */
104 new_memmap = early_memremap(new_memmap_phy,
105 memmap.desc_size * new_nr_map);
106 if (!new_memmap) {
107 memblock_free(new_memmap_phy, memmap.desc_size * new_nr_map);
108 return;
109 }
110
111 for (old = memmap.map, new = new_memmap;
112 old < memmap.map_end;
113 old += memmap.desc_size, new += memmap.desc_size) {
114
115 /* copy original EFI memory descriptor */
116 memcpy(new, old, memmap.desc_size);
117 md = new;
118 start = md->phys_addr;
119 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
120
121 for (i = 0; i < nr_fake_mem; i++) {
122 /* modifying range */
123 m_start = fake_mems[i].range.start;
124 m_end = fake_mems[i].range.end;
125 m_attr = fake_mems[i].attribute;
126
127 if (m_start <= start && end <= m_end)
128 md->attribute |= m_attr;
129
130 if (m_start <= start &&
131 (start < m_end && m_end < end)) {
132 /* first part */
133 md->attribute |= m_attr;
134 md->num_pages = (m_end - md->phys_addr + 1) >>
135 EFI_PAGE_SHIFT;
136 /* latter part */
137 new += memmap.desc_size;
138 memcpy(new, old, memmap.desc_size);
139 md = new;
140 md->phys_addr = m_end + 1;
141 md->num_pages = (end - md->phys_addr + 1) >>
142 EFI_PAGE_SHIFT;
143 }
144
145 if ((start < m_start && m_start < end) && m_end < end) {
146 /* first part */
147 md->num_pages = (m_start - md->phys_addr) >>
148 EFI_PAGE_SHIFT;
149 /* middle part */
150 new += memmap.desc_size;
151 memcpy(new, old, memmap.desc_size);
152 md = new;
153 md->attribute |= m_attr;
154 md->phys_addr = m_start;
155 md->num_pages = (m_end - m_start + 1) >>
156 EFI_PAGE_SHIFT;
157 /* last part */
158 new += memmap.desc_size;
159 memcpy(new, old, memmap.desc_size);
160 md = new;
161 md->phys_addr = m_end + 1;
162 md->num_pages = (end - m_end) >>
163 EFI_PAGE_SHIFT;
164 }
165
166 if ((start < m_start && m_start < end) &&
167 (end <= m_end)) {
168 /* first part */
169 md->num_pages = (m_start - md->phys_addr) >>
170 EFI_PAGE_SHIFT;
171 /* latter part */
172 new += memmap.desc_size;
173 memcpy(new, old, memmap.desc_size);
174 md = new;
175 md->phys_addr = m_start;
176 md->num_pages = (end - md->phys_addr + 1) >>
177 EFI_PAGE_SHIFT;
178 md->attribute |= m_attr;
179 }
180 }
181 }
182
183 /* swap into new EFI memmap */
184 efi_unmap_memmap();
185 memmap.map = new_memmap;
186 memmap.phys_map = new_memmap_phy;
187 memmap.nr_map = new_nr_map;
188 memmap.map_end = memmap.map + memmap.nr_map * memmap.desc_size;
189 set_bit(EFI_MEMMAP, &efi.flags);
190
191 /* print new EFI memmap */
192 efi_print_memmap();
193}
194
195static int __init setup_fake_mem(char *p)
196{
197 u64 start = 0, mem_size = 0, attribute = 0;
198 int i;
199
200 if (!p)
201 return -EINVAL;
202
203 while (*p != '\0') {
204 mem_size = memparse(p, &p);
205 if (*p == '@')
206 start = memparse(p+1, &p);
207 else
208 break;
209
210 if (*p == ':')
211 attribute = simple_strtoull(p+1, &p, 0);
212 else
213 break;
214
215 if (nr_fake_mem >= EFI_MAX_FAKEMEM)
216 break;
217
218 fake_mems[nr_fake_mem].range.start = start;
219 fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
220 fake_mems[nr_fake_mem].attribute = attribute;
221 nr_fake_mem++;
222
223 if (*p == ',')
224 p++;
225 }
226
227 sort(fake_mems, nr_fake_mem, sizeof(struct fake_mem),
228 cmp_fake_mem, NULL);
229
230 for (i = 0; i < nr_fake_mem; i++)
231 pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]",
232 fake_mems[i].attribute, fake_mems[i].range.start,
233 fake_mems[i].range.end);
234
235 return *p == '\0' ? 0 : -EINVAL;
236}
237
238early_param("efi_fake_mem", setup_fake_mem);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8949b3f6f74d..b18bea08ff25 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -119,6 +119,13 @@ config GPIO_ALTERA
119 119
120 If driver is built as a module it will be called gpio-altera. 120 If driver is built as a module it will be called gpio-altera.
121 121
122config GPIO_AMDPT
123 tristate "AMD Promontory GPIO support"
124 depends on ACPI
125 help
126 driver for GPIO functionality on Promontory IOHub
127 Require ACPI ASL code to enumerate as a platform device.
128
122config GPIO_BCM_KONA 129config GPIO_BCM_KONA
123 bool "Broadcom Kona GPIO" 130 bool "Broadcom Kona GPIO"
124 depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST) 131 depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
@@ -176,16 +183,6 @@ config GPIO_ETRAXFS
176 help 183 help
177 Say yes here to support the GPIO controller on Axis ETRAX FS SoCs. 184 Say yes here to support the GPIO controller on Axis ETRAX FS SoCs.
178 185
179config GPIO_F7188X
180 tristate "F71869, F71869A, F71882FG and F71889F GPIO support"
181 depends on X86
182 help
183 This option enables support for GPIOs found on Fintek Super-I/O
184 chips F71869, F71869A, F71882FG and F71889F.
185
186 To compile this driver as a module, choose M here: the module will
187 be called f7188x-gpio.
188
189config GPIO_GE_FPGA 186config GPIO_GE_FPGA
190 bool "GE FPGA based GPIO" 187 bool "GE FPGA based GPIO"
191 depends on GE_FPGA 188 depends on GE_FPGA
@@ -235,12 +232,6 @@ config GPIO_IOP
235 232
236 If unsure, say N. 233 If unsure, say N.
237 234
238config GPIO_IT8761E
239 tristate "IT8761E GPIO support"
240 depends on X86 # unconditional access to IO space.
241 help
242 Say yes here to support GPIO functionality of IT8761E super I/O chip.
243
244config GPIO_LOONGSON 235config GPIO_LOONGSON
245 bool "Loongson-2/3 GPIO support" 236 bool "Loongson-2/3 GPIO support"
246 depends on CPU_LOONGSON2 || CPU_LOONGSON3 237 depends on CPU_LOONGSON2 || CPU_LOONGSON3
@@ -297,14 +288,6 @@ config GPIO_MPC8XXX
297 Say Y here if you're going to use hardware that connects to the 288 Say Y here if you're going to use hardware that connects to the
298 MPC512x/831x/834x/837x/8572/8610 GPIOs. 289 MPC512x/831x/834x/837x/8572/8610 GPIOs.
299 290
300config GPIO_MSM_V2
301 tristate "Qualcomm MSM GPIO v2"
302 depends on GPIOLIB && OF && ARCH_QCOM
303 help
304 Say yes here to support the GPIO interface on ARM v7 based
305 Qualcomm MSM chips. Most of the pins on the MSM can be
306 selected for GPIO, and are controlled by this driver.
307
308config GPIO_MVEBU 291config GPIO_MVEBU
309 def_bool y 292 def_bool y
310 depends on PLAT_ORION 293 depends on PLAT_ORION
@@ -368,42 +351,6 @@ config GPIO_SAMSUNG
368 Legacy GPIO support. Use only for platforms without support for 351 Legacy GPIO support. Use only for platforms without support for
369 pinctrl. 352 pinctrl.
370 353
371config GPIO_SCH
372 tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
373 depends on PCI && X86
374 select MFD_CORE
375 select LPC_SCH
376 help
377 Say yes here to support GPIO interface on Intel Poulsbo SCH,
378 Intel Tunnel Creek processor, Intel Centerton processor or
379 Intel Quark X1000 SoC.
380
381 The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
382 powered by the core power rail and are turned off during sleep
383 modes (S3 and higher). The remaining four GPIOs are powered by
384 the Intel SCH suspend power supply. These GPIOs remain
385 active during S3. The suspend powered GPIOs can be used to wake the
386 system from the Suspend-to-RAM state.
387
388 The Intel Tunnel Creek processor has 5 GPIOs powered by the
389 core power rail and 9 from suspend power supply.
390
391 The Intel Centerton processor has a total of 30 GPIO pins.
392 Twenty-one are powered by the core power rail and 9 from the
393 suspend power supply.
394
395 The Intel Quark X1000 SoC has 2 GPIOs powered by the core
396 power well and 6 from the suspend power well.
397
398config GPIO_SCH311X
399 tristate "SMSC SCH311x SuperI/O GPIO"
400 help
401 Driver to enable the GPIOs found on SMSC SMSC SCH3112, SCH3114 and
402 SCH3116 "Super I/O" chipsets.
403
404 To compile this driver as a module, choose M here: the module will
405 be called gpio-sch311x.
406
407config GPIO_SPEAR_SPICS 354config GPIO_SPEAR_SPICS
408 bool "ST SPEAr13xx SPI Chip Select as GPIO support" 355 bool "ST SPEAr13xx SPI Chip Select as GPIO support"
409 depends on PLAT_SPEAR 356 depends on PLAT_SPEAR
@@ -440,15 +387,6 @@ config GPIO_TB10X
440 select GENERIC_IRQ_CHIP 387 select GENERIC_IRQ_CHIP
441 select OF_GPIO 388 select OF_GPIO
442 389
443config GPIO_TS5500
444 tristate "TS-5500 DIO blocks and compatibles"
445 depends on TS5500 || COMPILE_TEST
446 help
447 This driver supports Digital I/O exposed by pin blocks found on some
448 Technologic Systems platforms. It includes, but is not limited to, 3
449 blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
450 LCD port.
451
452config GPIO_TZ1090 390config GPIO_TZ1090
453 bool "Toumaz Xenif TZ1090 GPIO support" 391 bool "Toumaz Xenif TZ1090 GPIO support"
454 depends on SOC_TZ1090 392 depends on SOC_TZ1090
@@ -508,13 +446,13 @@ config GPIO_XGENE_SB
508 446
509config GPIO_XILINX 447config GPIO_XILINX
510 tristate "Xilinx GPIO support" 448 tristate "Xilinx GPIO support"
511 depends on OF_GPIO && (PPC || MICROBLAZE || ARCH_ZYNQ || X86) 449 depends on OF_GPIO
512 help 450 help
513 Say yes here to support the Xilinx FPGA GPIO device 451 Say yes here to support the Xilinx FPGA GPIO device
514 452
515config GPIO_XLP 453config GPIO_XLP
516 tristate "Netlogic XLP GPIO support" 454 tristate "Netlogic XLP GPIO support"
517 depends on CPU_XLP 455 depends on CPU_XLP && OF_GPIO
518 select GPIOLIB_IRQCHIP 456 select GPIOLIB_IRQCHIP
519 help 457 help
520 This driver provides support for GPIO interface on Netlogic XLP MIPS64 458 This driver provides support for GPIO interface on Netlogic XLP MIPS64
@@ -545,6 +483,87 @@ config GPIO_ZYNQ
545 help 483 help
546 Say yes here to support Xilinx Zynq GPIO controller. 484 Say yes here to support Xilinx Zynq GPIO controller.
547 485
486config GPIO_ZX
487 bool "ZTE ZX GPIO support"
488 select GPIOLIB_IRQCHIP
489 help
490 Say yes here to support the GPIO device on ZTE ZX SoCs.
491
492endmenu
493
494menu "Port-mapped I/O GPIO drivers"
495 depends on X86 # Unconditional I/O space access
496
497config GPIO_104_IDIO_16
498 tristate "ACCES 104-IDIO-16 GPIO support"
499 help
500 Enables GPIO support for the ACCES 104-IDIO-16 family.
501
502config GPIO_F7188X
503 tristate "F71869, F71869A, F71882FG and F71889F GPIO support"
504 help
505 This option enables support for GPIOs found on Fintek Super-I/O
506 chips F71869, F71869A, F71882FG and F71889F.
507
508 To compile this driver as a module, choose M here: the module will
509 be called f7188x-gpio.
510
511config GPIO_IT87
512 tristate "IT87xx GPIO support"
513 help
514 Say yes here to support GPIO functionality of IT87xx Super I/O chips.
515
516 This driver is tested with ITE IT8728 and IT8732 Super I/O chips, and
517 supports the IT8761E Super I/O chip as well.
518
519 To compile this driver as a module, choose M here: the module will
520 be called gpio_it87
521
522config GPIO_SCH
523 tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO"
524 depends on PCI
525 select MFD_CORE
526 select LPC_SCH
527 help
528 Say yes here to support GPIO interface on Intel Poulsbo SCH,
529 Intel Tunnel Creek processor, Intel Centerton processor or
530 Intel Quark X1000 SoC.
531
532 The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
533 powered by the core power rail and are turned off during sleep
534 modes (S3 and higher). The remaining four GPIOs are powered by
535 the Intel SCH suspend power supply. These GPIOs remain
536 active during S3. The suspend powered GPIOs can be used to wake the
537 system from the Suspend-to-RAM state.
538
539 The Intel Tunnel Creek processor has 5 GPIOs powered by the
540 core power rail and 9 from suspend power supply.
541
542 The Intel Centerton processor has a total of 30 GPIO pins.
543 Twenty-one are powered by the core power rail and 9 from the
544 suspend power supply.
545
546 The Intel Quark X1000 SoC has 2 GPIOs powered by the core
547 power well and 6 from the suspend power well.
548
549config GPIO_SCH311X
550 tristate "SMSC SCH311x SuperI/O GPIO"
551 help
552 Driver to enable the GPIOs found on SMSC SMSC SCH3112, SCH3114 and
553 SCH3116 "Super I/O" chipsets.
554
555 To compile this driver as a module, choose M here: the module will
556 be called gpio-sch311x.
557
558config GPIO_TS5500
559 tristate "TS-5500 DIO blocks and compatibles"
560 depends on TS5500 || COMPILE_TEST
561 help
562 This driver supports Digital I/O exposed by pin blocks found on some
563 Technologic Systems platforms. It includes, but is not limited to, 3
564 blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
565 LCD port.
566
548endmenu 567endmenu
549 568
550menu "I2C GPIO expanders" 569menu "I2C GPIO expanders"
@@ -552,7 +571,6 @@ menu "I2C GPIO expanders"
552 571
553config GPIO_ADP5588 572config GPIO_ADP5588
554 tristate "ADP5588 I2C GPIO expander" 573 tristate "ADP5588 I2C GPIO expander"
555 depends on I2C
556 help 574 help
557 This option enables support for 18 GPIOs found 575 This option enables support for 18 GPIOs found
558 on Analog Devices ADP5588 GPIO Expanders. 576 on Analog Devices ADP5588 GPIO Expanders.
@@ -566,7 +584,7 @@ config GPIO_ADP5588_IRQ
566 584
567config GPIO_ADNP 585config GPIO_ADNP
568 tristate "Avionic Design N-bit GPIO expander" 586 tristate "Avionic Design N-bit GPIO expander"
569 depends on I2C && OF_GPIO 587 depends on OF_GPIO
570 select GPIOLIB_IRQCHIP 588 select GPIOLIB_IRQCHIP
571 help 589 help
572 This option enables support for N GPIOs found on Avionic Design 590 This option enables support for N GPIOs found on Avionic Design
@@ -578,14 +596,12 @@ config GPIO_ADNP
578 596
579config GPIO_MAX7300 597config GPIO_MAX7300
580 tristate "Maxim MAX7300 GPIO expander" 598 tristate "Maxim MAX7300 GPIO expander"
581 depends on I2C
582 select GPIO_MAX730X 599 select GPIO_MAX730X
583 help 600 help
584 GPIO driver for Maxim MAX7300 I2C-based GPIO expander. 601 GPIO driver for Maxim MAX7300 I2C-based GPIO expander.
585 602
586config GPIO_MAX732X 603config GPIO_MAX732X
587 tristate "MAX7319, MAX7320-7327 I2C Port Expanders" 604 tristate "MAX7319, MAX7320-7327 I2C Port Expanders"
588 depends on I2C
589 help 605 help
590 Say yes here to support the MAX7319, MAX7320-7327 series of I2C 606 Say yes here to support the MAX7319, MAX7320-7327 series of I2C
591 Port Expanders. Each IO port on these chips has a fixed role of 607 Port Expanders. Each IO port on these chips has a fixed role of
@@ -618,7 +634,6 @@ config GPIO_MC9S08DZ60
618 634
619config GPIO_PCA953X 635config GPIO_PCA953X
620 tristate "PCA95[357]x, PCA9698, TCA64xx, and MAX7310 I/O ports" 636 tristate "PCA95[357]x, PCA9698, TCA64xx, and MAX7310 I/O ports"
621 depends on I2C
622 help 637 help
623 Say yes here to provide access to several register-oriented 638 Say yes here to provide access to several register-oriented
624 SMBus I/O expanders, made mostly by NXP or TI. Compatible 639 SMBus I/O expanders, made mostly by NXP or TI. Compatible
@@ -646,7 +661,6 @@ config GPIO_PCA953X_IRQ
646 661
647config GPIO_PCF857X 662config GPIO_PCF857X
648 tristate "PCF857x, PCA{85,96}7x, and MAX732[89] I2C GPIO expanders" 663 tristate "PCF857x, PCA{85,96}7x, and MAX732[89] I2C GPIO expanders"
649 depends on I2C
650 select GPIOLIB_IRQCHIP 664 select GPIOLIB_IRQCHIP
651 select IRQ_DOMAIN 665 select IRQ_DOMAIN
652 help 666 help
@@ -976,7 +990,7 @@ menu "SPI GPIO expanders"
976 990
977config GPIO_74X164 991config GPIO_74X164
978 tristate "74x164 serial-in/parallel-out 8-bits shift register" 992 tristate "74x164 serial-in/parallel-out 8-bits shift register"
979 depends on SPI_MASTER && OF 993 depends on OF
980 help 994 help
981 Driver for 74x164 compatible serial-in/parallel-out 8-outputs 995 Driver for 74x164 compatible serial-in/parallel-out 8-outputs
982 shift registers. This driver can be used to provide access 996 shift registers. This driver can be used to provide access
@@ -984,32 +998,28 @@ config GPIO_74X164
984 998
985config GPIO_MAX7301 999config GPIO_MAX7301
986 tristate "Maxim MAX7301 GPIO expander" 1000 tristate "Maxim MAX7301 GPIO expander"
987 depends on SPI_MASTER
988 select GPIO_MAX730X 1001 select GPIO_MAX730X
989 help 1002 help
990 GPIO driver for Maxim MAX7301 SPI-based GPIO expander. 1003 GPIO driver for Maxim MAX7301 SPI-based GPIO expander.
991 1004
992config GPIO_MCP23S08
993 tristate "Microchip MCP23xxx I/O expander"
994 depends on (SPI_MASTER && !I2C) || I2C
995 help
996 SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
997 I/O expanders.
998 This provides a GPIO interface supporting inputs and outputs.
999 The I2C versions of the chips can be used as interrupt-controller.
1000
1001config GPIO_MC33880 1005config GPIO_MC33880
1002 tristate "Freescale MC33880 high-side/low-side switch" 1006 tristate "Freescale MC33880 high-side/low-side switch"
1003 depends on SPI_MASTER
1004 help 1007 help
1005 SPI driver for Freescale MC33880 high-side/low-side switch. 1008 SPI driver for Freescale MC33880 high-side/low-side switch.
1006 This provides GPIO interface supporting inputs and outputs. 1009 This provides GPIO interface supporting inputs and outputs.
1007 1010
1008config GPIO_ZX 1011endmenu
1009 bool "ZTE ZX GPIO support" 1012
1010 select GPIOLIB_IRQCHIP 1013menu "SPI or I2C GPIO expanders"
1014 depends on (SPI_MASTER && !I2C) || I2C
1015
1016config GPIO_MCP23S08
1017 tristate "Microchip MCP23xxx I/O expander"
1011 help 1018 help
1012 Say yes here to support the GPIO device on ZTE ZX SoCs. 1019 SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
1020 I/O expanders.
1021 This provides a GPIO interface supporting inputs and outputs.
1022 The I2C versions of the chips can be used as interrupt-controller.
1013 1023
1014endmenu 1024endmenu
1015 1025
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index f79a7c482a99..986dbd838cea 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
12# Device drivers. Generally keep list sorted alphabetically 12# Device drivers. Generally keep list sorted alphabetically
13obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o 13obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
14 14
15obj-$(CONFIG_GPIO_104_IDIO_16) += gpio-104-idio-16.o
15obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o 16obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
16obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o 17obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
17obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o 18obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
19obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o 20obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
20obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o 21obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
21obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o 22obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
23obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
22obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o 24obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
23obj-$(CONFIG_ATH79) += gpio-ath79.o 25obj-$(CONFIG_ATH79) += gpio-ath79.o
24obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o 26obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
@@ -40,7 +42,7 @@ obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
40obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o 42obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
41obj-$(CONFIG_GPIO_ICH) += gpio-ich.o 43obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
42obj-$(CONFIG_GPIO_IOP) += gpio-iop.o 44obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
43obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o 45obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
44obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o 46obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
45obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o 47obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
46obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o 48obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
@@ -64,7 +66,6 @@ obj-$(CONFIG_GPIO_MOXART) += gpio-moxart.o
64obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o 66obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
65obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o 67obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
66obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o 68obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
67obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
68obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o 69obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
69obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o 70obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
70obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o 71obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
new file mode 100644
index 000000000000..5400d7d4d8fd
--- /dev/null
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -0,0 +1,216 @@
1/*
2 * GPIO driver for the ACCES 104-IDIO-16 family
3 * Copyright (C) 2015 William Breathitt Gray
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 */
14#include <linux/device.h>
15#include <linux/errno.h>
16#include <linux/gpio/driver.h>
17#include <linux/io.h>
18#include <linux/ioport.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/platform_device.h>
23#include <linux/spinlock.h>
24
25static unsigned idio_16_base;
26module_param(idio_16_base, uint, 0);
27MODULE_PARM_DESC(idio_16_base, "ACCES 104-IDIO-16 base address");
28
29/**
30 * struct idio_16_gpio - GPIO device private data structure
31 * @chip: instance of the gpio_chip
32 * @lock: synchronization lock to prevent gpio_set race conditions
33 * @base: base port address of the GPIO device
34 * @extent: extent of port address region of the GPIO device
35 * @out_state: output bits state
36 */
37struct idio_16_gpio {
38 struct gpio_chip chip;
39 spinlock_t lock;
40 unsigned base;
41 unsigned extent;
42 unsigned out_state;
43};
44
45static int idio_16_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
46{
47 if (offset > 15)
48 return 1;
49
50 return 0;
51}
52
53static int idio_16_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
54{
55 return 0;
56}
57
58static int idio_16_gpio_direction_output(struct gpio_chip *chip,
59 unsigned offset, int value)
60{
61 chip->set(chip, offset, value);
62 return 0;
63}
64
65static struct idio_16_gpio *to_idio16gpio(struct gpio_chip *gc)
66{
67 return container_of(gc, struct idio_16_gpio, chip);
68}
69
70static int idio_16_gpio_get(struct gpio_chip *chip, unsigned offset)
71{
72 struct idio_16_gpio *const idio16gpio = to_idio16gpio(chip);
73 const unsigned BIT_MASK = 1U << (offset-16);
74
75 if (offset < 16)
76 return -EINVAL;
77
78 if (offset < 24)
79 return !!(inb(idio16gpio->base + 1) & BIT_MASK);
80
81 return !!(inb(idio16gpio->base + 5) & (BIT_MASK>>8));
82}
83
84static void idio_16_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
85{
86 struct idio_16_gpio *const idio16gpio = to_idio16gpio(chip);
87 const unsigned BIT_MASK = 1U << offset;
88 unsigned long flags;
89
90 if (offset > 15)
91 return;
92
93 spin_lock_irqsave(&idio16gpio->lock, flags);
94
95 if (value)
96 idio16gpio->out_state |= BIT_MASK;
97 else
98 idio16gpio->out_state &= ~BIT_MASK;
99
100 if (offset > 7)
101 outb(idio16gpio->out_state >> 8, idio16gpio->base + 4);
102 else
103 outb(idio16gpio->out_state, idio16gpio->base);
104
105 spin_unlock_irqrestore(&idio16gpio->lock, flags);
106}
107
108static int __init idio_16_probe(struct platform_device *pdev)
109{
110 struct device *dev = &pdev->dev;
111 struct idio_16_gpio *idio16gpio;
112 int err;
113
114 const unsigned BASE = idio_16_base;
115 const unsigned EXTENT = 8;
116 const char *const NAME = dev_name(dev);
117
118 idio16gpio = devm_kzalloc(dev, sizeof(*idio16gpio), GFP_KERNEL);
119 if (!idio16gpio)
120 return -ENOMEM;
121
122 if (!request_region(BASE, EXTENT, NAME)) {
123 dev_err(dev, "Unable to lock %s port addresses (0x%X-0x%X)\n",
124 NAME, BASE, BASE + EXTENT);
125 err = -EBUSY;
126 goto err_lock_io_port;
127 }
128
129 idio16gpio->chip.label = NAME;
130 idio16gpio->chip.dev = dev;
131 idio16gpio->chip.owner = THIS_MODULE;
132 idio16gpio->chip.base = -1;
133 idio16gpio->chip.ngpio = 32;
134 idio16gpio->chip.get_direction = idio_16_gpio_get_direction;
135 idio16gpio->chip.direction_input = idio_16_gpio_direction_input;
136 idio16gpio->chip.direction_output = idio_16_gpio_direction_output;
137 idio16gpio->chip.get = idio_16_gpio_get;
138 idio16gpio->chip.set = idio_16_gpio_set;
139 idio16gpio->base = BASE;
140 idio16gpio->extent = EXTENT;
141 idio16gpio->out_state = 0xFFFF;
142
143 spin_lock_init(&idio16gpio->lock);
144
145 dev_set_drvdata(dev, idio16gpio);
146
147 err = gpiochip_add(&idio16gpio->chip);
148 if (err) {
149 dev_err(dev, "GPIO registering failed (%d)\n", err);
150 goto err_gpio_register;
151 }
152
153 return 0;
154
155err_gpio_register:
156 release_region(BASE, EXTENT);
157err_lock_io_port:
158 return err;
159}
160
161static int idio_16_remove(struct platform_device *pdev)
162{
163 struct idio_16_gpio *const idio16gpio = platform_get_drvdata(pdev);
164
165 gpiochip_remove(&idio16gpio->chip);
166 release_region(idio16gpio->base, idio16gpio->extent);
167
168 return 0;
169}
170
171static struct platform_device *idio_16_device;
172
173static struct platform_driver idio_16_driver = {
174 .driver = {
175 .name = "104-idio-16"
176 },
177 .remove = idio_16_remove
178};
179
180static void __exit idio_16_exit(void)
181{
182 platform_device_unregister(idio_16_device);
183 platform_driver_unregister(&idio_16_driver);
184}
185
186static int __init idio_16_init(void)
187{
188 int err;
189
190 idio_16_device = platform_device_alloc(idio_16_driver.driver.name, -1);
191 if (!idio_16_device)
192 return -ENOMEM;
193
194 err = platform_device_add(idio_16_device);
195 if (err)
196 goto err_platform_device;
197
198 err = platform_driver_probe(&idio_16_driver, idio_16_probe);
199 if (err)
200 goto err_platform_driver;
201
202 return 0;
203
204err_platform_driver:
205 platform_device_del(idio_16_device);
206err_platform_device:
207 platform_device_put(idio_16_device);
208 return err;
209}
210
211module_init(idio_16_init);
212module_exit(idio_16_exit);
213
214MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
215MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver");
216MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 1b44941574fa..3e6661bab54a 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -42,6 +42,11 @@ struct altera_gpio_chip {
42 int mapped_irq; 42 int mapped_irq;
43}; 43};
44 44
45static struct altera_gpio_chip *to_altera(struct gpio_chip *gc)
46{
47 return container_of(gc, struct altera_gpio_chip, mmchip.gc);
48}
49
45static void altera_gpio_irq_unmask(struct irq_data *d) 50static void altera_gpio_irq_unmask(struct irq_data *d)
46{ 51{
47 struct altera_gpio_chip *altera_gc; 52 struct altera_gpio_chip *altera_gc;
@@ -49,7 +54,7 @@ static void altera_gpio_irq_unmask(struct irq_data *d)
49 unsigned long flags; 54 unsigned long flags;
50 u32 intmask; 55 u32 intmask;
51 56
52 altera_gc = irq_data_get_irq_chip_data(d); 57 altera_gc = to_altera(irq_data_get_irq_chip_data(d));
53 mm_gc = &altera_gc->mmchip; 58 mm_gc = &altera_gc->mmchip;
54 59
55 spin_lock_irqsave(&altera_gc->gpio_lock, flags); 60 spin_lock_irqsave(&altera_gc->gpio_lock, flags);
@@ -67,7 +72,7 @@ static void altera_gpio_irq_mask(struct irq_data *d)
67 unsigned long flags; 72 unsigned long flags;
68 u32 intmask; 73 u32 intmask;
69 74
70 altera_gc = irq_data_get_irq_chip_data(d); 75 altera_gc = to_altera(irq_data_get_irq_chip_data(d));
71 mm_gc = &altera_gc->mmchip; 76 mm_gc = &altera_gc->mmchip;
72 77
73 spin_lock_irqsave(&altera_gc->gpio_lock, flags); 78 spin_lock_irqsave(&altera_gc->gpio_lock, flags);
@@ -87,7 +92,7 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
87{ 92{
88 struct altera_gpio_chip *altera_gc; 93 struct altera_gpio_chip *altera_gc;
89 94
90 altera_gc = irq_data_get_irq_chip_data(d); 95 altera_gc = to_altera(irq_data_get_irq_chip_data(d));
91 96
92 if (type == IRQ_TYPE_NONE) 97 if (type == IRQ_TYPE_NONE)
93 return 0; 98 return 0;
@@ -210,7 +215,7 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
210 unsigned long status; 215 unsigned long status;
211 int i; 216 int i;
212 217
213 altera_gc = irq_desc_get_handler_data(desc); 218 altera_gc = to_altera(irq_desc_get_handler_data(desc));
214 chip = irq_desc_get_chip(desc); 219 chip = irq_desc_get_chip(desc);
215 mm_gc = &altera_gc->mmchip; 220 mm_gc = &altera_gc->mmchip;
216 irqdomain = altera_gc->mmchip.gc.irqdomain; 221 irqdomain = altera_gc->mmchip.gc.irqdomain;
@@ -239,7 +244,7 @@ static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
239 unsigned long status; 244 unsigned long status;
240 int i; 245 int i;
241 246
242 altera_gc = irq_desc_get_handler_data(desc); 247 altera_gc = to_altera(irq_desc_get_handler_data(desc));
243 chip = irq_desc_get_chip(desc); 248 chip = irq_desc_get_chip(desc);
244 mm_gc = &altera_gc->mmchip; 249 mm_gc = &altera_gc->mmchip;
245 irqdomain = altera_gc->mmchip.gc.irqdomain; 250 irqdomain = altera_gc->mmchip.gc.irqdomain;
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
new file mode 100644
index 000000000000..cbbb966d4fc0
--- /dev/null
+++ b/drivers/gpio/gpio-amdpt.c
@@ -0,0 +1,261 @@
1/*
2 * AMD Promontory GPIO driver
3 *
4 * Copyright (C) 2015 ASMedia Technology Inc.
5 * Author: YD Tseng <yd_tseng@asmedia.com.tw>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/gpio/driver.h>
15#include <linux/spinlock.h>
16#include <linux/acpi.h>
17#include <linux/platform_device.h>
18
19#define PT_TOTAL_GPIO 8
20
21/* PCI-E MMIO register offsets */
22#define PT_DIRECTION_REG 0x00
23#define PT_INPUTDATA_REG 0x04
24#define PT_OUTPUTDATA_REG 0x08
25#define PT_CLOCKRATE_REG 0x0C
26#define PT_SYNC_REG 0x28
27
28struct pt_gpio_chip {
29 struct gpio_chip gc;
30 void __iomem *reg_base;
31 spinlock_t lock;
32};
33
34#define to_pt_gpio(c) container_of(c, struct pt_gpio_chip, gc)
35
36static int pt_gpio_request(struct gpio_chip *gc, unsigned offset)
37{
38 struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
39 unsigned long flags;
40 u32 using_pins;
41
42 dev_dbg(gc->dev, "pt_gpio_request offset=%x\n", offset);
43
44 spin_lock_irqsave(&pt_gpio->lock, flags);
45
46 using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
47 if (using_pins & BIT(offset)) {
48 dev_warn(gc->dev, "PT GPIO pin %x reconfigured\n",
49 offset);
50 spin_unlock_irqrestore(&pt_gpio->lock, flags);
51 return -EINVAL;
52 }
53
54 writel(using_pins | BIT(offset), pt_gpio->reg_base + PT_SYNC_REG);
55
56 spin_unlock_irqrestore(&pt_gpio->lock, flags);
57
58 return 0;
59}
60
61static void pt_gpio_free(struct gpio_chip *gc, unsigned offset)
62{
63 struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
64 unsigned long flags;
65 u32 using_pins;
66
67 spin_lock_irqsave(&pt_gpio->lock, flags);
68
69 using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
70 using_pins &= ~BIT(offset);
71 writel(using_pins, pt_gpio->reg_base + PT_SYNC_REG);
72
73 spin_unlock_irqrestore(&pt_gpio->lock, flags);
74
75 dev_dbg(gc->dev, "pt_gpio_free offset=%x\n", offset);
76}
77
78static void pt_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
79{
80 struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
81 unsigned long flags;
82 u32 data;
83
84 dev_dbg(gc->dev, "pt_gpio_set_value offset=%x, value=%x\n",
85 offset, value);
86
87 spin_lock_irqsave(&pt_gpio->lock, flags);
88
89 data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
90 data &= ~BIT(offset);
91 if (value)
92 data |= BIT(offset);
93 writel(data, pt_gpio->reg_base + PT_OUTPUTDATA_REG);
94
95 spin_unlock_irqrestore(&pt_gpio->lock, flags);
96}
97
98static int pt_gpio_get_value(struct gpio_chip *gc, unsigned offset)
99{
100 struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
101 unsigned long flags;
102 u32 data;
103
104 spin_lock_irqsave(&pt_gpio->lock, flags);
105
106 data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
107
108 /* configure as output */
109 if (data & BIT(offset))
110 data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
111 else /* configure as input */
112 data = readl(pt_gpio->reg_base + PT_INPUTDATA_REG);
113
114 spin_unlock_irqrestore(&pt_gpio->lock, flags);
115
116 data >>= offset;
117 data &= 1;
118
119 dev_dbg(gc->dev, "pt_gpio_get_value offset=%x, value=%x\n",
120 offset, data);
121
122 return data;
123}
124
125static int pt_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
126{
127 struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
128 unsigned long flags;
129 u32 data;
130
131 dev_dbg(gc->dev, "pt_gpio_dirction_input offset=%x\n", offset);
132
133 spin_lock_irqsave(&pt_gpio->lock, flags);
134
135 data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
136 data &= ~BIT(offset);
137 writel(data, pt_gpio->reg_base + PT_DIRECTION_REG);
138
139 spin_unlock_irqrestore(&pt_gpio->lock, flags);
140
141 return 0;
142}
143
144static int pt_gpio_direction_output(struct gpio_chip *gc,
145 unsigned offset, int value)
146{
147 struct pt_gpio_chip *pt_gpio = to_pt_gpio(gc);
148 unsigned long flags;
149 u32 data;
150
151 dev_dbg(gc->dev, "pt_gpio_direction_output offset=%x, value=%x\n",
152 offset, value);
153
154 spin_lock_irqsave(&pt_gpio->lock, flags);
155
156 data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
157 if (value)
158 data |= BIT(offset);
159 else
160 data &= ~BIT(offset);
161 writel(data, pt_gpio->reg_base + PT_OUTPUTDATA_REG);
162
163 data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
164 data |= BIT(offset);
165 writel(data, pt_gpio->reg_base + PT_DIRECTION_REG);
166
167 spin_unlock_irqrestore(&pt_gpio->lock, flags);
168
169 return 0;
170}
171
172static int pt_gpio_probe(struct platform_device *pdev)
173{
174 struct device *dev = &pdev->dev;
175 struct acpi_device *acpi_dev;
176 acpi_handle handle = ACPI_HANDLE(dev);
177 struct pt_gpio_chip *pt_gpio;
178 struct resource *res_mem;
179 int ret = 0;
180
181 if (acpi_bus_get_device(handle, &acpi_dev)) {
182 dev_err(dev, "PT GPIO device node not found\n");
183 return -ENODEV;
184 }
185
186 pt_gpio = devm_kzalloc(dev, sizeof(struct pt_gpio_chip), GFP_KERNEL);
187 if (!pt_gpio)
188 return -ENOMEM;
189
190 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
191 if (!res_mem) {
192 dev_err(&pdev->dev, "Failed to get MMIO resource for PT GPIO.\n");
193 return -EINVAL;
194 }
195 pt_gpio->reg_base = devm_ioremap_resource(dev, res_mem);
196 if (IS_ERR(pt_gpio->reg_base)) {
197 dev_err(&pdev->dev, "Failed to map MMIO resource for PT GPIO.\n");
198 return PTR_ERR(pt_gpio->reg_base);
199 }
200
201 spin_lock_init(&pt_gpio->lock);
202
203 pt_gpio->gc.label = pdev->name;
204 pt_gpio->gc.owner = THIS_MODULE;
205 pt_gpio->gc.dev = dev;
206 pt_gpio->gc.request = pt_gpio_request;
207 pt_gpio->gc.free = pt_gpio_free;
208 pt_gpio->gc.direction_input = pt_gpio_direction_input;
209 pt_gpio->gc.direction_output = pt_gpio_direction_output;
210 pt_gpio->gc.get = pt_gpio_get_value;
211 pt_gpio->gc.set = pt_gpio_set_value;
212 pt_gpio->gc.base = -1;
213 pt_gpio->gc.ngpio = PT_TOTAL_GPIO;
214#if defined(CONFIG_OF_GPIO)
215 pt_gpio->gc.of_node = pdev->dev.of_node;
216#endif
217 ret = gpiochip_add(&pt_gpio->gc);
218 if (ret) {
219 dev_err(&pdev->dev, "Failed to register GPIO lib\n");
220 return ret;
221 }
222
223 platform_set_drvdata(pdev, pt_gpio);
224
225 /* initialize register setting */
226 writel(0, pt_gpio->reg_base + PT_SYNC_REG);
227 writel(0, pt_gpio->reg_base + PT_CLOCKRATE_REG);
228
229 dev_dbg(&pdev->dev, "PT GPIO driver loaded\n");
230 return ret;
231}
232
233static int pt_gpio_remove(struct platform_device *pdev)
234{
235 struct pt_gpio_chip *pt_gpio = platform_get_drvdata(pdev);
236
237 gpiochip_remove(&pt_gpio->gc);
238
239 return 0;
240}
241
242static const struct acpi_device_id pt_gpio_acpi_match[] = {
243 { "AMDF030", 0 },
244 { },
245};
246MODULE_DEVICE_TABLE(acpi, pt_gpio_acpi_match);
247
248static struct platform_driver pt_gpio_driver = {
249 .driver = {
250 .name = "pt-gpio",
251 .acpi_match_table = ACPI_PTR(pt_gpio_acpi_match),
252 },
253 .probe = pt_gpio_probe,
254 .remove = pt_gpio_remove,
255};
256
257module_platform_driver(pt_gpio_driver);
258
259MODULE_LICENSE("GPL");
260MODULE_AUTHOR("YD Tseng <yd_tseng@asmedia.com.tw>");
261MODULE_DESCRIPTION("AMD Promontory GPIO Driver");
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 052fbc8fdaaa..ca002739616a 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -118,6 +118,8 @@ static int arizona_gpio_probe(struct platform_device *pdev)
118 case WM5110: 118 case WM5110:
119 case WM8280: 119 case WM8280:
120 case WM8997: 120 case WM8997:
121 case WM8998:
122 case WM1814:
121 arizona_gpio->gpio_chip.ngpio = 5; 123 arizona_gpio->gpio_chip.ngpio = 5;
122 break; 124 break;
123 default: 125 default:
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 03b995304ad6..e5827a56ff3b 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -12,61 +12,51 @@
12 * by the Free Software Foundation. 12 * by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/kernel.h> 15#include <linux/gpio/driver.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/spinlock.h>
20#include <linux/io.h>
21#include <linux/ioport.h>
22#include <linux/gpio.h>
23#include <linux/platform_data/gpio-ath79.h> 16#include <linux/platform_data/gpio-ath79.h>
24#include <linux/of_device.h> 17#include <linux/of_device.h>
25 18
26#include <asm/mach-ath79/ar71xx_regs.h> 19#include <asm/mach-ath79/ar71xx_regs.h>
27 20
28static void __iomem *ath79_gpio_base; 21struct ath79_gpio_ctrl {
29static u32 ath79_gpio_count; 22 struct gpio_chip chip;
30static DEFINE_SPINLOCK(ath79_gpio_lock); 23 void __iomem *base;
24 spinlock_t lock;
25};
26
27#define to_ath79_gpio_ctrl(c) container_of(c, struct ath79_gpio_ctrl, chip)
31 28
32static void __ath79_gpio_set_value(unsigned gpio, int value) 29static void ath79_gpio_set_value(struct gpio_chip *chip,
30 unsigned gpio, int value)
33{ 31{
34 void __iomem *base = ath79_gpio_base; 32 struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
35 33
36 if (value) 34 if (value)
37 __raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET); 35 __raw_writel(BIT(gpio), ctrl->base + AR71XX_GPIO_REG_SET);
38 else 36 else
39 __raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR); 37 __raw_writel(BIT(gpio), ctrl->base + AR71XX_GPIO_REG_CLEAR);
40} 38}
41 39
42static int __ath79_gpio_get_value(unsigned gpio) 40static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
43{ 41{
44 return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1; 42 struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
45}
46 43
47static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset) 44 return (__raw_readl(ctrl->base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
48{
49 return __ath79_gpio_get_value(offset);
50}
51
52static void ath79_gpio_set_value(struct gpio_chip *chip,
53 unsigned offset, int value)
54{
55 __ath79_gpio_set_value(offset, value);
56} 45}
57 46
58static int ath79_gpio_direction_input(struct gpio_chip *chip, 47static int ath79_gpio_direction_input(struct gpio_chip *chip,
59 unsigned offset) 48 unsigned offset)
60{ 49{
61 void __iomem *base = ath79_gpio_base; 50 struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
62 unsigned long flags; 51 unsigned long flags;
63 52
64 spin_lock_irqsave(&ath79_gpio_lock, flags); 53 spin_lock_irqsave(&ctrl->lock, flags);
65 54
66 __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset), 55 __raw_writel(
67 base + AR71XX_GPIO_REG_OE); 56 __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
57 ctrl->base + AR71XX_GPIO_REG_OE);
68 58
69 spin_unlock_irqrestore(&ath79_gpio_lock, flags); 59 spin_unlock_irqrestore(&ctrl->lock, flags);
70 60
71 return 0; 61 return 0;
72} 62}
@@ -74,35 +64,37 @@ static int ath79_gpio_direction_input(struct gpio_chip *chip,
74static int ath79_gpio_direction_output(struct gpio_chip *chip, 64static int ath79_gpio_direction_output(struct gpio_chip *chip,
75 unsigned offset, int value) 65 unsigned offset, int value)
76{ 66{
77 void __iomem *base = ath79_gpio_base; 67 struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
78 unsigned long flags; 68 unsigned long flags;
79 69
80 spin_lock_irqsave(&ath79_gpio_lock, flags); 70 spin_lock_irqsave(&ctrl->lock, flags);
81 71
82 if (value) 72 if (value)
83 __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET); 73 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_SET);
84 else 74 else
85 __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR); 75 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
86 76
87 __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset), 77 __raw_writel(
88 base + AR71XX_GPIO_REG_OE); 78 __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) | BIT(offset),
79 ctrl->base + AR71XX_GPIO_REG_OE);
89 80
90 spin_unlock_irqrestore(&ath79_gpio_lock, flags); 81 spin_unlock_irqrestore(&ctrl->lock, flags);
91 82
92 return 0; 83 return 0;
93} 84}
94 85
95static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 86static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
96{ 87{
97 void __iomem *base = ath79_gpio_base; 88 struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
98 unsigned long flags; 89 unsigned long flags;
99 90
100 spin_lock_irqsave(&ath79_gpio_lock, flags); 91 spin_lock_irqsave(&ctrl->lock, flags);
101 92
102 __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset), 93 __raw_writel(
103 base + AR71XX_GPIO_REG_OE); 94 __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) | BIT(offset),
95 ctrl->base + AR71XX_GPIO_REG_OE);
104 96
105 spin_unlock_irqrestore(&ath79_gpio_lock, flags); 97 spin_unlock_irqrestore(&ctrl->lock, flags);
106 98
107 return 0; 99 return 0;
108} 100}
@@ -110,25 +102,26 @@ static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
110static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset, 102static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
111 int value) 103 int value)
112{ 104{
113 void __iomem *base = ath79_gpio_base; 105 struct ath79_gpio_ctrl *ctrl = to_ath79_gpio_ctrl(chip);
114 unsigned long flags; 106 unsigned long flags;
115 107
116 spin_lock_irqsave(&ath79_gpio_lock, flags); 108 spin_lock_irqsave(&ctrl->lock, flags);
117 109
118 if (value) 110 if (value)
119 __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET); 111 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_SET);
120 else 112 else
121 __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR); 113 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
122 114
123 __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset), 115 __raw_writel(
124 base + AR71XX_GPIO_REG_OE); 116 __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset),
117 ctrl->base + AR71XX_GPIO_REG_OE);
125 118
126 spin_unlock_irqrestore(&ath79_gpio_lock, flags); 119 spin_unlock_irqrestore(&ctrl->lock, flags);
127 120
128 return 0; 121 return 0;
129} 122}
130 123
131static struct gpio_chip ath79_gpio_chip = { 124static const struct gpio_chip ath79_gpio_chip = {
132 .label = "ath79", 125 .label = "ath79",
133 .get = ath79_gpio_get_value, 126 .get = ath79_gpio_get_value,
134 .set = ath79_gpio_set_value, 127 .set = ath79_gpio_set_value,
@@ -147,10 +140,16 @@ static int ath79_gpio_probe(struct platform_device *pdev)
147{ 140{
148 struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data; 141 struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data;
149 struct device_node *np = pdev->dev.of_node; 142 struct device_node *np = pdev->dev.of_node;
143 struct ath79_gpio_ctrl *ctrl;
150 struct resource *res; 144 struct resource *res;
145 u32 ath79_gpio_count;
151 bool oe_inverted; 146 bool oe_inverted;
152 int err; 147 int err;
153 148
149 ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
150 if (!ctrl)
151 return -ENOMEM;
152
154 if (np) { 153 if (np) {
155 err = of_property_read_u32(np, "ngpios", &ath79_gpio_count); 154 err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
156 if (err) { 155 if (err) {
@@ -171,19 +170,21 @@ static int ath79_gpio_probe(struct platform_device *pdev)
171 } 170 }
172 171
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 ath79_gpio_base = devm_ioremap_nocache( 173 ctrl->base = devm_ioremap_nocache(
175 &pdev->dev, res->start, resource_size(res)); 174 &pdev->dev, res->start, resource_size(res));
176 if (!ath79_gpio_base) 175 if (!ctrl->base)
177 return -ENOMEM; 176 return -ENOMEM;
178 177
179 ath79_gpio_chip.dev = &pdev->dev; 178 spin_lock_init(&ctrl->lock);
180 ath79_gpio_chip.ngpio = ath79_gpio_count; 179 memcpy(&ctrl->chip, &ath79_gpio_chip, sizeof(ctrl->chip));
180 ctrl->chip.dev = &pdev->dev;
181 ctrl->chip.ngpio = ath79_gpio_count;
181 if (oe_inverted) { 182 if (oe_inverted) {
182 ath79_gpio_chip.direction_input = ar934x_gpio_direction_input; 183 ctrl->chip.direction_input = ar934x_gpio_direction_input;
183 ath79_gpio_chip.direction_output = ar934x_gpio_direction_output; 184 ctrl->chip.direction_output = ar934x_gpio_direction_output;
184 } 185 }
185 186
186 err = gpiochip_add(&ath79_gpio_chip); 187 err = gpiochip_add(&ctrl->chip);
187 if (err) { 188 if (err) {
188 dev_err(&pdev->dev, 189 dev_err(&pdev->dev,
189 "cannot add AR71xx GPIO chip, error=%d", err); 190 "cannot add AR71xx GPIO chip, error=%d", err);
diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c
index 2ffcd9fdd1f2..5c15dd12172d 100644
--- a/drivers/gpio/gpio-etraxfs.c
+++ b/drivers/gpio/gpio-etraxfs.c
@@ -176,6 +176,11 @@ static const struct etraxfs_gpio_info etraxfs_gpio_artpec3 = {
176 .rw_intr_pins = ARTPEC3_rw_intr_pins, 176 .rw_intr_pins = ARTPEC3_rw_intr_pins,
177}; 177};
178 178
179static struct etraxfs_gpio_chip *to_etraxfs(struct gpio_chip *gc)
180{
181 return container_of(gc, struct etraxfs_gpio_chip, bgc.gc);
182}
183
179static unsigned int etraxfs_gpio_chip_to_port(struct gpio_chip *gc) 184static unsigned int etraxfs_gpio_chip_to_port(struct gpio_chip *gc)
180{ 185{
181 return gc->label[0] - 'A'; 186 return gc->label[0] - 'A';
@@ -220,7 +225,8 @@ static unsigned int etraxfs_gpio_to_group_pin(struct etraxfs_gpio_chip *chip,
220 225
221static void etraxfs_gpio_irq_ack(struct irq_data *d) 226static void etraxfs_gpio_irq_ack(struct irq_data *d)
222{ 227{
223 struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); 228 struct etraxfs_gpio_chip *chip =
229 to_etraxfs(irq_data_get_irq_chip_data(d));
224 struct etraxfs_gpio_block *block = chip->block; 230 struct etraxfs_gpio_block *block = chip->block;
225 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); 231 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
226 232
@@ -229,7 +235,8 @@ static void etraxfs_gpio_irq_ack(struct irq_data *d)
229 235
230static void etraxfs_gpio_irq_mask(struct irq_data *d) 236static void etraxfs_gpio_irq_mask(struct irq_data *d)
231{ 237{
232 struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); 238 struct etraxfs_gpio_chip *chip =
239 to_etraxfs(irq_data_get_irq_chip_data(d));
233 struct etraxfs_gpio_block *block = chip->block; 240 struct etraxfs_gpio_block *block = chip->block;
234 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); 241 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
235 242
@@ -241,7 +248,8 @@ static void etraxfs_gpio_irq_mask(struct irq_data *d)
241 248
242static void etraxfs_gpio_irq_unmask(struct irq_data *d) 249static void etraxfs_gpio_irq_unmask(struct irq_data *d)
243{ 250{
244 struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); 251 struct etraxfs_gpio_chip *chip =
252 to_etraxfs(irq_data_get_irq_chip_data(d));
245 struct etraxfs_gpio_block *block = chip->block; 253 struct etraxfs_gpio_block *block = chip->block;
246 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); 254 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
247 255
@@ -253,7 +261,8 @@ static void etraxfs_gpio_irq_unmask(struct irq_data *d)
253 261
254static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type) 262static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type)
255{ 263{
256 struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); 264 struct etraxfs_gpio_chip *chip =
265 to_etraxfs(irq_data_get_irq_chip_data(d));
257 struct etraxfs_gpio_block *block = chip->block; 266 struct etraxfs_gpio_block *block = chip->block;
258 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); 267 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
259 u32 cfg; 268 u32 cfg;
@@ -289,7 +298,8 @@ static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type)
289 298
290static int etraxfs_gpio_irq_request_resources(struct irq_data *d) 299static int etraxfs_gpio_irq_request_resources(struct irq_data *d)
291{ 300{
292 struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); 301 struct etraxfs_gpio_chip *chip =
302 to_etraxfs(irq_data_get_irq_chip_data(d));
293 struct etraxfs_gpio_block *block = chip->block; 303 struct etraxfs_gpio_block *block = chip->block;
294 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); 304 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
295 int ret = -EBUSY; 305 int ret = -EBUSY;
@@ -319,7 +329,8 @@ out:
319 329
320static void etraxfs_gpio_irq_release_resources(struct irq_data *d) 330static void etraxfs_gpio_irq_release_resources(struct irq_data *d)
321{ 331{
322 struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); 332 struct etraxfs_gpio_chip *chip =
333 to_etraxfs(irq_data_get_irq_chip_data(d));
323 struct etraxfs_gpio_block *block = chip->block; 334 struct etraxfs_gpio_block *block = chip->block;
324 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); 335 unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
325 336
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index a3f07537fe62..bd5193c67a9c 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -579,40 +579,20 @@ EXPORT_SYMBOL_GPL(bgpio_init);
579 579
580static void __iomem *bgpio_map(struct platform_device *pdev, 580static void __iomem *bgpio_map(struct platform_device *pdev,
581 const char *name, 581 const char *name,
582 resource_size_t sane_sz, 582 resource_size_t sane_sz)
583 int *err)
584{ 583{
585 struct device *dev = &pdev->dev;
586 struct resource *r; 584 struct resource *r;
587 resource_size_t start;
588 resource_size_t sz; 585 resource_size_t sz;
589 void __iomem *ret;
590
591 *err = 0;
592 586
593 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 587 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
594 if (!r) 588 if (!r)
595 return NULL; 589 return NULL;
596 590
597 sz = resource_size(r); 591 sz = resource_size(r);
598 if (sz != sane_sz) { 592 if (sz != sane_sz)
599 *err = -EINVAL; 593 return IOMEM_ERR_PTR(-EINVAL);
600 return NULL;
601 }
602
603 start = r->start;
604 if (!devm_request_mem_region(dev, start, sz, r->name)) {
605 *err = -EBUSY;
606 return NULL;
607 }
608
609 ret = devm_ioremap(dev, start, sz);
610 if (!ret) {
611 *err = -ENOMEM;
612 return NULL;
613 }
614 594
615 return ret; 595 return devm_ioremap_resource(&pdev->dev, r);
616} 596}
617 597
618static int bgpio_pdev_probe(struct platform_device *pdev) 598static int bgpio_pdev_probe(struct platform_device *pdev)
@@ -636,25 +616,25 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
636 616
637 sz = resource_size(r); 617 sz = resource_size(r);
638 618
639 dat = bgpio_map(pdev, "dat", sz, &err); 619 dat = bgpio_map(pdev, "dat", sz);
640 if (!dat) 620 if (IS_ERR(dat))
641 return err ? err : -EINVAL; 621 return PTR_ERR(dat);
642 622
643 set = bgpio_map(pdev, "set", sz, &err); 623 set = bgpio_map(pdev, "set", sz);
644 if (err) 624 if (IS_ERR(set))
645 return err; 625 return PTR_ERR(set);
646 626
647 clr = bgpio_map(pdev, "clr", sz, &err); 627 clr = bgpio_map(pdev, "clr", sz);
648 if (err) 628 if (IS_ERR(clr))
649 return err; 629 return PTR_ERR(clr);
650 630
651 dirout = bgpio_map(pdev, "dirout", sz, &err); 631 dirout = bgpio_map(pdev, "dirout", sz);
652 if (err) 632 if (IS_ERR(dirout))
653 return err; 633 return PTR_ERR(dirout);
654 634
655 dirin = bgpio_map(pdev, "dirin", sz, &err); 635 dirin = bgpio_map(pdev, "dirin", sz);
656 if (err) 636 if (IS_ERR(dirin))
657 return err; 637 return PTR_ERR(dirin);
658 638
659 bgc = devm_kzalloc(&pdev->dev, sizeof(*bgc), GFP_KERNEL); 639 bgc = devm_kzalloc(&pdev->dev, sizeof(*bgc), GFP_KERNEL);
660 if (!bgc) 640 if (!bgc)
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
new file mode 100644
index 000000000000..21f6f7c0eb34
--- /dev/null
+++ b/drivers/gpio/gpio-it87.c
@@ -0,0 +1,411 @@
1/*
2 * GPIO interface for IT87xx Super I/O chips
3 *
4 * Author: Diego Elio Pettenò <flameeyes@flameeyes.eu>
5 *
6 * Based on it87_wdt.c by Oliver Schuster
7 * gpio-it8761e.c by Denis Turischev
8 * gpio-stmpe.c by Rabin Vincent
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License 2 as published
12 * by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, write to
21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/io.h>
30#include <linux/errno.h>
31#include <linux/ioport.h>
32#include <linux/slab.h>
33#include <linux/gpio.h>
34
35/* Chip Id numbers */
36#define NO_DEV_ID 0xffff
37#define IT8728_ID 0x8728
38#define IT8732_ID 0x8732
39#define IT8761_ID 0x8761
40
41/* IO Ports */
42#define REG 0x2e
43#define VAL 0x2f
44
45/* Logical device Numbers LDN */
46#define GPIO 0x07
47
48/* Configuration Registers and Functions */
49#define LDNREG 0x07
50#define CHIPID 0x20
51#define CHIPREV 0x22
52
53/**
54 * struct it87_gpio - it87-specific GPIO chip
55 * @chip the underlying gpio_chip structure
56 * @lock a lock to avoid races between operations
57 * @io_base base address for gpio ports
58 * @io_size size of the port rage starting from io_base.
59 * @output_base Super I/O register address for Output Enable register
60 * @simple_base Super I/O 'Simple I/O' Enable register
61 * @simple_size Super IO 'Simple I/O' Enable register size; this is
62 * required because IT87xx chips might only provide Simple I/O
63 * switches on a subset of lines, whereas the others keep the
64 * same status all time.
65 */
66struct it87_gpio {
67 struct gpio_chip chip;
68 spinlock_t lock;
69 u16 io_base;
70 u16 io_size;
71 u8 output_base;
72 u8 simple_base;
73 u8 simple_size;
74};
75
76static struct it87_gpio it87_gpio_chip = {
77 .lock = __SPIN_LOCK_UNLOCKED(it87_gpio_chip.lock),
78};
79
80static inline struct it87_gpio *to_it87_gpio(struct gpio_chip *chip)
81{
82 return container_of(chip, struct it87_gpio, chip);
83}
84
85/* Superio chip access functions; copied from wdt_it87 */
86
87static inline int superio_enter(void)
88{
89 /*
90 * Try to reserve REG and REG + 1 for exclusive access.
91 */
92 if (!request_muxed_region(REG, 2, KBUILD_MODNAME))
93 return -EBUSY;
94
95 outb(0x87, REG);
96 outb(0x01, REG);
97 outb(0x55, REG);
98 outb(0x55, REG);
99 return 0;
100}
101
102static inline void superio_exit(void)
103{
104 outb(0x02, REG);
105 outb(0x02, VAL);
106 release_region(REG, 2);
107}
108
109static inline void superio_select(int ldn)
110{
111 outb(LDNREG, REG);
112 outb(ldn, VAL);
113}
114
115static inline int superio_inb(int reg)
116{
117 outb(reg, REG);
118 return inb(VAL);
119}
120
121static inline void superio_outb(int val, int reg)
122{
123 outb(reg, REG);
124 outb(val, VAL);
125}
126
127static inline int superio_inw(int reg)
128{
129 int val;
130
131 outb(reg++, REG);
132 val = inb(VAL) << 8;
133 outb(reg, REG);
134 val |= inb(VAL);
135 return val;
136}
137
138static inline void superio_outw(int val, int reg)
139{
140 outb(reg++, REG);
141 outb(val >> 8, VAL);
142 outb(reg, REG);
143 outb(val, VAL);
144}
145
146static inline void superio_set_mask(int mask, int reg)
147{
148 u8 curr_val = superio_inb(reg);
149 u8 new_val = curr_val | mask;
150
151 if (curr_val != new_val)
152 superio_outb(new_val, reg);
153}
154
155static inline void superio_clear_mask(int mask, int reg)
156{
157 u8 curr_val = superio_inb(reg);
158 u8 new_val = curr_val & ~mask;
159
160 if (curr_val != new_val)
161 superio_outb(new_val, reg);
162}
163
164static int it87_gpio_request(struct gpio_chip *chip, unsigned gpio_num)
165{
166 u8 mask, group;
167 int rc = 0;
168 struct it87_gpio *it87_gpio = to_it87_gpio(chip);
169
170 mask = 1 << (gpio_num % 8);
171 group = (gpio_num / 8);
172
173 spin_lock(&it87_gpio->lock);
174
175 rc = superio_enter();
176 if (rc)
177 goto exit;
178
179 /* not all the IT87xx chips support Simple I/O and not all of
180 * them allow all the lines to be set/unset to Simple I/O.
181 */
182 if (group < it87_gpio->simple_size)
183 superio_set_mask(mask, group + it87_gpio->simple_base);
184
185 /* clear output enable, setting the pin to input, as all the
186 * newly-exported GPIO interfaces are set to input.
187 */
188 superio_clear_mask(mask, group + it87_gpio->output_base);
189
190 superio_exit();
191
192exit:
193 spin_unlock(&it87_gpio->lock);
194 return rc;
195}
196
197static int it87_gpio_get(struct gpio_chip *chip, unsigned gpio_num)
198{
199 u16 reg;
200 u8 mask;
201 struct it87_gpio *it87_gpio = to_it87_gpio(chip);
202
203 mask = 1 << (gpio_num % 8);
204 reg = (gpio_num / 8) + it87_gpio->io_base;
205
206 return !!(inb(reg) & mask);
207}
208
209static int it87_gpio_direction_in(struct gpio_chip *chip, unsigned gpio_num)
210{
211 u8 mask, group;
212 int rc = 0;
213 struct it87_gpio *it87_gpio = to_it87_gpio(chip);
214
215 mask = 1 << (gpio_num % 8);
216 group = (gpio_num / 8);
217
218 spin_lock(&it87_gpio->lock);
219
220 rc = superio_enter();
221 if (rc)
222 goto exit;
223
224 /* clear the output enable bit */
225 superio_clear_mask(mask, group + it87_gpio->output_base);
226
227 superio_exit();
228
229exit:
230 spin_unlock(&it87_gpio->lock);
231 return rc;
232}
233
234static void it87_gpio_set(struct gpio_chip *chip,
235 unsigned gpio_num, int val)
236{
237 u8 mask, curr_vals;
238 u16 reg;
239 struct it87_gpio *it87_gpio = to_it87_gpio(chip);
240
241 mask = 1 << (gpio_num % 8);
242 reg = (gpio_num / 8) + it87_gpio->io_base;
243
244 curr_vals = inb(reg);
245 if (val)
246 outb(curr_vals | mask, reg);
247 else
248 outb(curr_vals & ~mask, reg);
249}
250
251static int it87_gpio_direction_out(struct gpio_chip *chip,
252 unsigned gpio_num, int val)
253{
254 u8 mask, group;
255 int rc = 0;
256 struct it87_gpio *it87_gpio = to_it87_gpio(chip);
257
258 mask = 1 << (gpio_num % 8);
259 group = (gpio_num / 8);
260
261 spin_lock(&it87_gpio->lock);
262
263 rc = superio_enter();
264 if (rc)
265 goto exit;
266
267 /* set the output enable bit */
268 superio_set_mask(mask, group + it87_gpio->output_base);
269
270 it87_gpio_set(chip, gpio_num, val);
271
272 superio_exit();
273
274exit:
275 spin_unlock(&it87_gpio->lock);
276 return rc;
277}
278
279static struct gpio_chip it87_template_chip = {
280 .label = KBUILD_MODNAME,
281 .owner = THIS_MODULE,
282 .request = it87_gpio_request,
283 .get = it87_gpio_get,
284 .direction_input = it87_gpio_direction_in,
285 .set = it87_gpio_set,
286 .direction_output = it87_gpio_direction_out,
287 .base = -1
288};
289
290static int __init it87_gpio_init(void)
291{
292 int rc = 0, i;
293 u16 chip_type;
294 u8 chip_rev, gpio_ba_reg;
295 char *labels, **labels_table;
296
297 struct it87_gpio *it87_gpio = &it87_gpio_chip;
298
299 rc = superio_enter();
300 if (rc)
301 return rc;
302
303 chip_type = superio_inw(CHIPID);
304 chip_rev = superio_inb(CHIPREV) & 0x0f;
305 superio_exit();
306
307 it87_gpio->chip = it87_template_chip;
308
309 switch (chip_type) {
310 case IT8728_ID:
311 case IT8732_ID:
312 gpio_ba_reg = 0x62;
313 it87_gpio->io_size = 8;
314 it87_gpio->output_base = 0xc8;
315 it87_gpio->simple_base = 0xc0;
316 it87_gpio->simple_size = 5;
317 it87_gpio->chip.ngpio = 64;
318 break;
319 case IT8761_ID:
320 gpio_ba_reg = 0x60;
321 it87_gpio->io_size = 4;
322 it87_gpio->output_base = 0xf0;
323 it87_gpio->simple_size = 0;
324 it87_gpio->chip.ngpio = 16;
325 break;
326 case NO_DEV_ID:
327 pr_err("no device\n");
328 return -ENODEV;
329 default:
330 pr_err("Unknown Chip found, Chip %04x Revision %x\n",
331 chip_type, chip_rev);
332 return -ENODEV;
333 }
334
335 rc = superio_enter();
336 if (rc)
337 return rc;
338
339 superio_select(GPIO);
340
341 /* fetch GPIO base address */
342 it87_gpio->io_base = superio_inw(gpio_ba_reg);
343
344 superio_exit();
345
346 pr_info("Found Chip IT%04x rev %x. %u GPIO lines starting at %04xh\n",
347 chip_type, chip_rev, it87_gpio->chip.ngpio,
348 it87_gpio->io_base);
349
350 if (!request_region(it87_gpio->io_base, it87_gpio->io_size,
351 KBUILD_MODNAME))
352 return -EBUSY;
353
354 /* Set up aliases for the GPIO connection.
355 *
356 * ITE documentation for recent chips such as the IT8728F
357 * refers to the GPIO lines as GPxy, with a coordinates system
358 * where x is the GPIO group (starting from 1) and y is the
359 * bit within the group.
360 *
361 * By creating these aliases, we make it easier to understand
362 * to which GPIO pin we're referring to.
363 */
364 labels = kcalloc(it87_gpio->chip.ngpio, sizeof("it87_gpXY"),
365 GFP_KERNEL);
366 labels_table = kcalloc(it87_gpio->chip.ngpio, sizeof(const char *),
367 GFP_KERNEL);
368
369 if (!labels || !labels_table) {
370 rc = -ENOMEM;
371 goto labels_free;
372 }
373
374 for (i = 0; i < it87_gpio->chip.ngpio; i++) {
375 char *label = &labels[i * sizeof("it87_gpXY")];
376
377 sprintf(label, "it87_gp%u%u", 1+(i/8), i%8);
378 labels_table[i] = label;
379 }
380
381 it87_gpio->chip.names = (const char *const*)labels_table;
382
383 rc = gpiochip_add(&it87_gpio->chip);
384 if (rc)
385 goto labels_free;
386
387 return 0;
388
389labels_free:
390 kfree(labels_table);
391 kfree(labels);
392 release_region(it87_gpio->io_base, it87_gpio->io_size);
393 return rc;
394}
395
396static void __exit it87_gpio_exit(void)
397{
398 struct it87_gpio *it87_gpio = &it87_gpio_chip;
399
400 gpiochip_remove(&it87_gpio->chip);
401 release_region(it87_gpio->io_base, it87_gpio->io_size);
402 kfree(it87_gpio->chip.names[0]);
403 kfree(it87_gpio->chip.names);
404}
405
406module_init(it87_gpio_init);
407module_exit(it87_gpio_exit);
408
409MODULE_AUTHOR("Diego Elio Pettenò <flameeyes@flameeyes.eu>");
410MODULE_DESCRIPTION("GPIO interface for IT87xx Super I/O chips");
411MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-it8761e.c b/drivers/gpio/gpio-it8761e.c
deleted file mode 100644
index 30a8f24c92c5..000000000000
--- a/drivers/gpio/gpio-it8761e.c
+++ /dev/null
@@ -1,230 +0,0 @@
1/*
2 * GPIO interface for IT8761E Super I/O chip
3 *
4 * Author: Denis Turischev <denis@compulab.co.il>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/io.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26
27#include <linux/gpio.h>
28
29#define SIO_CHIP_ID 0x8761
30#define CHIP_ID_HIGH_BYTE 0x20
31#define CHIP_ID_LOW_BYTE 0x21
32
33static u8 ports[2] = { 0x2e, 0x4e };
34static u8 port;
35
36static DEFINE_SPINLOCK(sio_lock);
37
38#define GPIO_NAME "it8761-gpio"
39#define GPIO_BA_HIGH_BYTE 0x60
40#define GPIO_BA_LOW_BYTE 0x61
41#define GPIO_IOSIZE 4
42#define GPIO1X_IO 0xf0
43#define GPIO2X_IO 0xf1
44
45static u16 gpio_ba;
46
47static u8 read_reg(u8 addr, u8 port)
48{
49 outb(addr, port);
50 return inb(port + 1);
51}
52
53static void write_reg(u8 data, u8 addr, u8 port)
54{
55 outb(addr, port);
56 outb(data, port + 1);
57}
58
59static void enter_conf_mode(u8 port)
60{
61 outb(0x87, port);
62 outb(0x61, port);
63 outb(0x55, port);
64 outb((port == 0x2e) ? 0x55 : 0xaa, port);
65}
66
67static void exit_conf_mode(u8 port)
68{
69 outb(0x2, port);
70 outb(0x2, port + 1);
71}
72
73static void enter_gpio_mode(u8 port)
74{
75 write_reg(0x2, 0x7, port);
76}
77
78static int it8761e_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
79{
80 u16 reg;
81 u8 bit;
82
83 bit = gpio_num % 8;
84 reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba;
85
86 return !!(inb(reg) & (1 << bit));
87}
88
89static int it8761e_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
90{
91 u8 curr_dirs;
92 u8 io_reg, bit;
93
94 bit = gpio_num % 8;
95 io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO;
96
97 spin_lock(&sio_lock);
98
99 enter_conf_mode(port);
100 enter_gpio_mode(port);
101
102 curr_dirs = read_reg(io_reg, port);
103
104 if (curr_dirs & (1 << bit))
105 write_reg(curr_dirs & ~(1 << bit), io_reg, port);
106
107 exit_conf_mode(port);
108
109 spin_unlock(&sio_lock);
110 return 0;
111}
112
113static void it8761e_gpio_set(struct gpio_chip *gc,
114 unsigned gpio_num, int val)
115{
116 u8 curr_vals, bit;
117 u16 reg;
118
119 bit = gpio_num % 8;
120 reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba;
121
122 spin_lock(&sio_lock);
123
124 curr_vals = inb(reg);
125 if (val)
126 outb(curr_vals | (1 << bit), reg);
127 else
128 outb(curr_vals & ~(1 << bit), reg);
129
130 spin_unlock(&sio_lock);
131}
132
133static int it8761e_gpio_direction_out(struct gpio_chip *gc,
134 unsigned gpio_num, int val)
135{
136 u8 curr_dirs, io_reg, bit;
137
138 bit = gpio_num % 8;
139 io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO;
140
141 it8761e_gpio_set(gc, gpio_num, val);
142
143 spin_lock(&sio_lock);
144
145 enter_conf_mode(port);
146 enter_gpio_mode(port);
147
148 curr_dirs = read_reg(io_reg, port);
149
150 if (!(curr_dirs & (1 << bit)))
151 write_reg(curr_dirs | (1 << bit), io_reg, port);
152
153 exit_conf_mode(port);
154
155 spin_unlock(&sio_lock);
156 return 0;
157}
158
159static struct gpio_chip it8761e_gpio_chip = {
160 .label = GPIO_NAME,
161 .owner = THIS_MODULE,
162 .get = it8761e_gpio_get,
163 .direction_input = it8761e_gpio_direction_in,
164 .set = it8761e_gpio_set,
165 .direction_output = it8761e_gpio_direction_out,
166};
167
168static int __init it8761e_gpio_init(void)
169{
170 int i, id, err;
171
172 /* chip and port detection */
173 for (i = 0; i < ARRAY_SIZE(ports); i++) {
174 spin_lock(&sio_lock);
175 enter_conf_mode(ports[i]);
176
177 id = (read_reg(CHIP_ID_HIGH_BYTE, ports[i]) << 8) +
178 read_reg(CHIP_ID_LOW_BYTE, ports[i]);
179
180 exit_conf_mode(ports[i]);
181 spin_unlock(&sio_lock);
182
183 if (id == SIO_CHIP_ID) {
184 port = ports[i];
185 break;
186 }
187 }
188
189 if (!port)
190 return -ENODEV;
191
192 /* fetch GPIO base address */
193 enter_conf_mode(port);
194 enter_gpio_mode(port);
195 gpio_ba = (read_reg(GPIO_BA_HIGH_BYTE, port) << 8) +
196 read_reg(GPIO_BA_LOW_BYTE, port);
197 exit_conf_mode(port);
198
199 if (!request_region(gpio_ba, GPIO_IOSIZE, GPIO_NAME))
200 return -EBUSY;
201
202 it8761e_gpio_chip.base = -1;
203 it8761e_gpio_chip.ngpio = 16;
204
205 err = gpiochip_add(&it8761e_gpio_chip);
206 if (err < 0)
207 goto gpiochip_add_err;
208
209 return 0;
210
211gpiochip_add_err:
212 release_region(gpio_ba, GPIO_IOSIZE);
213 gpio_ba = 0;
214 return err;
215}
216
217static void __exit it8761e_gpio_exit(void)
218{
219 if (gpio_ba) {
220 gpiochip_remove(&it8761e_gpio_chip);
221 release_region(gpio_ba, GPIO_IOSIZE);
222 gpio_ba = 0;
223 }
224}
225module_init(it8761e_gpio_init);
226module_exit(it8761e_gpio_exit);
227
228MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
229MODULE_DESCRIPTION("GPIO interface for IT8761E Super I/O chip");
230MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index eb68603136b0..e39dcb0af8ae 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -36,16 +36,6 @@ static inline struct lpc18xx_gpio_chip *to_lpc18xx_gpio(struct gpio_chip *chip)
36 return container_of(chip, struct lpc18xx_gpio_chip, gpio); 36 return container_of(chip, struct lpc18xx_gpio_chip, gpio);
37} 37}
38 38
39static int lpc18xx_gpio_request(struct gpio_chip *chip, unsigned offset)
40{
41 return pinctrl_request_gpio(offset);
42}
43
44static void lpc18xx_gpio_free(struct gpio_chip *chip, unsigned offset)
45{
46 pinctrl_free_gpio(offset);
47}
48
49static void lpc18xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 39static void lpc18xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
50{ 40{
51 struct lpc18xx_gpio_chip *gc = to_lpc18xx_gpio(chip); 41 struct lpc18xx_gpio_chip *gc = to_lpc18xx_gpio(chip);
@@ -95,8 +85,8 @@ static int lpc18xx_gpio_direction_output(struct gpio_chip *chip,
95 85
96static struct gpio_chip lpc18xx_chip = { 86static struct gpio_chip lpc18xx_chip = {
97 .label = "lpc18xx/43xx-gpio", 87 .label = "lpc18xx/43xx-gpio",
98 .request = lpc18xx_gpio_request, 88 .request = gpiochip_generic_request,
99 .free = lpc18xx_gpio_free, 89 .free = gpiochip_generic_free,
100 .direction_input = lpc18xx_gpio_direction_input, 90 .direction_input = lpc18xx_gpio_direction_input,
101 .direction_output = lpc18xx_gpio_direction_output, 91 .direction_output = lpc18xx_gpio_direction_output,
102 .set = lpc18xx_gpio_set, 92 .set = lpc18xx_gpio_set,
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 18ab89e20806..0f57d2d248ec 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -236,7 +236,6 @@ int __max730x_remove(struct device *dev)
236 ts->write(dev, 0x04, 0x00); 236 ts->write(dev, 0x04, 0x00);
237 gpiochip_remove(&ts->chip); 237 gpiochip_remove(&ts->chip);
238 mutex_destroy(&ts->lock); 238 mutex_destroy(&ts->lock);
239 kfree(ts);
240 return 0; 239 return 0;
241} 240}
242EXPORT_SYMBOL_GPL(__max730x_remove); 241EXPORT_SYMBOL_GPL(__max730x_remove);
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
index abd8676ce2b6..d3355a6dc9b1 100644
--- a/drivers/gpio/gpio-moxart.c
+++ b/drivers/gpio/gpio-moxart.c
@@ -29,16 +29,6 @@
29#define GPIO_DATA_IN 0x04 29#define GPIO_DATA_IN 0x04
30#define GPIO_PIN_DIRECTION 0x08 30#define GPIO_PIN_DIRECTION 0x08
31 31
32static int moxart_gpio_request(struct gpio_chip *chip, unsigned offset)
33{
34 return pinctrl_request_gpio(offset);
35}
36
37static void moxart_gpio_free(struct gpio_chip *chip, unsigned offset)
38{
39 pinctrl_free_gpio(offset);
40}
41
42static int moxart_gpio_probe(struct platform_device *pdev) 32static int moxart_gpio_probe(struct platform_device *pdev)
43{ 33{
44 struct device *dev = &pdev->dev; 34 struct device *dev = &pdev->dev;
@@ -66,8 +56,8 @@ static int moxart_gpio_probe(struct platform_device *pdev)
66 } 56 }
67 57
68 bgc->gc.label = "moxart-gpio"; 58 bgc->gc.label = "moxart-gpio";
69 bgc->gc.request = moxart_gpio_request; 59 bgc->gc.request = gpiochip_generic_request;
70 bgc->gc.free = moxart_gpio_free; 60 bgc->gc.free = gpiochip_generic_free;
71 bgc->data = bgc->read_reg(bgc->reg_set); 61 bgc->data = bgc->read_reg(bgc->reg_set);
72 bgc->gc.base = 0; 62 bgc->gc.base = 0;
73 bgc->gc.ngpio = 32; 63 bgc->gc.ngpio = 32;
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
deleted file mode 100644
index 4b4222145f10..000000000000
--- a/drivers/gpio/gpio-msm-v2.c
+++ /dev/null
@@ -1,453 +0,0 @@
1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 *
17 */
18#define pr_fmt(fmt) "%s: " fmt, __func__
19
20#include <linux/bitmap.h>
21#include <linux/bitops.h>
22#include <linux/err.h>
23#include <linux/gpio.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/irqchip/chained_irq.h>
28#include <linux/irq.h>
29#include <linux/irqdomain.h>
30#include <linux/module.h>
31#include <linux/of_address.h>
32#include <linux/platform_device.h>
33#include <linux/spinlock.h>
34#include <linux/slab.h>
35
36#define MAX_NR_GPIO 300
37
38/* Bits of interest in the GPIO_IN_OUT register.
39 */
40enum {
41 GPIO_IN = 0,
42 GPIO_OUT = 1
43};
44
45/* Bits of interest in the GPIO_INTR_STATUS register.
46 */
47enum {
48 INTR_STATUS = 0,
49};
50
51/* Bits of interest in the GPIO_CFG register.
52 */
53enum {
54 GPIO_OE = 9,
55};
56
57/* Bits of interest in the GPIO_INTR_CFG register.
58 * When a GPIO triggers, two separate decisions are made, controlled
59 * by two separate flags.
60 *
61 * - First, INTR_RAW_STATUS_EN controls whether or not the GPIO_INTR_STATUS
62 * register for that GPIO will be updated to reflect the triggering of that
63 * gpio. If this bit is 0, this register will not be updated.
64 * - Second, INTR_ENABLE controls whether an interrupt is triggered.
65 *
66 * If INTR_ENABLE is set and INTR_RAW_STATUS_EN is NOT set, an interrupt
67 * can be triggered but the status register will not reflect it.
68 */
69enum {
70 INTR_ENABLE = 0,
71 INTR_POL_CTL = 1,
72 INTR_DECT_CTL = 2,
73 INTR_RAW_STATUS_EN = 3,
74};
75
76/* Codes of interest in GPIO_INTR_CFG_SU.
77 */
78enum {
79 TARGET_PROC_SCORPION = 4,
80 TARGET_PROC_NONE = 7,
81};
82
83/**
84 * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure
85 *
86 * @enabled_irqs: a bitmap used to optimize the summary-irq handler. By
87 * keeping track of which gpios are unmasked as irq sources, we avoid
88 * having to do readl calls on hundreds of iomapped registers each time
89 * the summary interrupt fires in order to locate the active interrupts.
90 *
91 * @wake_irqs: a bitmap for tracking which interrupt lines are enabled
92 * as wakeup sources. When the device is suspended, interrupts which are
93 * not wakeup sources are disabled.
94 *
95 * @dual_edge_irqs: a bitmap used to track which irqs are configured
96 * as dual-edge, as this is not supported by the hardware and requires
97 * some special handling in the driver.
98 */
99struct msm_gpio_dev {
100 struct gpio_chip gpio_chip;
101 DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
102 DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO);
103 DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
104 struct irq_domain *domain;
105 int summary_irq;
106 void __iomem *msm_tlmm_base;
107};
108
109static struct msm_gpio_dev msm_gpio;
110
111#define GPIO_INTR_CFG_SU(gpio) (msm_gpio.msm_tlmm_base + 0x0400 + \
112 (0x04 * (gpio)))
113#define GPIO_CONFIG(gpio) (msm_gpio.msm_tlmm_base + 0x1000 + \
114 (0x10 * (gpio)))
115#define GPIO_IN_OUT(gpio) (msm_gpio.msm_tlmm_base + 0x1004 + \
116 (0x10 * (gpio)))
117#define GPIO_INTR_CFG(gpio) (msm_gpio.msm_tlmm_base + 0x1008 + \
118 (0x10 * (gpio)))
119#define GPIO_INTR_STATUS(gpio) (msm_gpio.msm_tlmm_base + 0x100c + \
120 (0x10 * (gpio)))
121
122static DEFINE_SPINLOCK(tlmm_lock);
123
124static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip)
125{
126 return container_of(chip, struct msm_gpio_dev, gpio_chip);
127}
128
129static inline void set_gpio_bits(unsigned n, void __iomem *reg)
130{
131 writel(readl(reg) | n, reg);
132}
133
134static inline void clear_gpio_bits(unsigned n, void __iomem *reg)
135{
136 writel(readl(reg) & ~n, reg);
137}
138
139static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
140{
141 return readl(GPIO_IN_OUT(offset)) & BIT(GPIO_IN);
142}
143
144static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
145{
146 writel(val ? BIT(GPIO_OUT) : 0, GPIO_IN_OUT(offset));
147}
148
149static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
150{
151 unsigned long irq_flags;
152
153 spin_lock_irqsave(&tlmm_lock, irq_flags);
154 clear_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
155 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
156 return 0;
157}
158
159static int msm_gpio_direction_output(struct gpio_chip *chip,
160 unsigned offset,
161 int val)
162{
163 unsigned long irq_flags;
164
165 spin_lock_irqsave(&tlmm_lock, irq_flags);
166 msm_gpio_set(chip, offset, val);
167 set_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
168 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
169 return 0;
170}
171
172static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
173{
174 return 0;
175}
176
177static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
178{
179 return;
180}
181
182static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
183{
184 struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
185 struct irq_domain *domain = g_dev->domain;
186
187 return irq_create_mapping(domain, offset);
188}
189
190/* For dual-edge interrupts in software, since the hardware has no
191 * such support:
192 *
193 * At appropriate moments, this function may be called to flip the polarity
194 * settings of both-edge irq lines to try and catch the next edge.
195 *
196 * The attempt is considered successful if:
197 * - the status bit goes high, indicating that an edge was caught, or
198 * - the input value of the gpio doesn't change during the attempt.
199 * If the value changes twice during the process, that would cause the first
200 * test to fail but would force the second, as two opposite
201 * transitions would cause a detection no matter the polarity setting.
202 *
203 * The do-loop tries to sledge-hammer closed the timing hole between
204 * the initial value-read and the polarity-write - if the line value changes
205 * during that window, an interrupt is lost, the new polarity setting is
206 * incorrect, and the first success test will fail, causing a retry.
207 *
208 * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c.
209 */
210static void msm_gpio_update_dual_edge_pos(unsigned gpio)
211{
212 int loop_limit = 100;
213 unsigned val, val2, intstat;
214
215 do {
216 val = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
217 if (val)
218 clear_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
219 else
220 set_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
221 val2 = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
222 intstat = readl(GPIO_INTR_STATUS(gpio)) & BIT(INTR_STATUS);
223 if (intstat || val == val2)
224 return;
225 } while (loop_limit-- > 0);
226 pr_err("%s: dual-edge irq failed to stabilize, "
227 "interrupts dropped. %#08x != %#08x\n",
228 __func__, val, val2);
229}
230
231static void msm_gpio_irq_ack(struct irq_data *d)
232{
233 int gpio = d->hwirq;
234
235 writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
236 if (test_bit(gpio, msm_gpio.dual_edge_irqs))
237 msm_gpio_update_dual_edge_pos(gpio);
238}
239
240static void msm_gpio_irq_mask(struct irq_data *d)
241{
242 unsigned long irq_flags;
243 int gpio = d->hwirq;
244
245 spin_lock_irqsave(&tlmm_lock, irq_flags);
246 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
247 clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
248 __clear_bit(gpio, msm_gpio.enabled_irqs);
249 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
250}
251
252static void msm_gpio_irq_unmask(struct irq_data *d)
253{
254 unsigned long irq_flags;
255 int gpio = d->hwirq;
256
257 spin_lock_irqsave(&tlmm_lock, irq_flags);
258 __set_bit(gpio, msm_gpio.enabled_irqs);
259 set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
260 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
261 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
262}
263
264static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
265{
266 unsigned long irq_flags;
267 int gpio = d->hwirq;
268 uint32_t bits;
269
270 spin_lock_irqsave(&tlmm_lock, irq_flags);
271
272 bits = readl(GPIO_INTR_CFG(gpio));
273
274 if (flow_type & IRQ_TYPE_EDGE_BOTH) {
275 bits |= BIT(INTR_DECT_CTL);
276 irq_set_handler_locked(d, handle_edge_irq);
277 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
278 __set_bit(gpio, msm_gpio.dual_edge_irqs);
279 else
280 __clear_bit(gpio, msm_gpio.dual_edge_irqs);
281 } else {
282 bits &= ~BIT(INTR_DECT_CTL);
283 irq_set_handler_locked(d, handle_level_irq);
284 __clear_bit(gpio, msm_gpio.dual_edge_irqs);
285 }
286
287 if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
288 bits |= BIT(INTR_POL_CTL);
289 else
290 bits &= ~BIT(INTR_POL_CTL);
291
292 writel(bits, GPIO_INTR_CFG(gpio));
293
294 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
295 msm_gpio_update_dual_edge_pos(gpio);
296
297 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
298
299 return 0;
300}
301
302/*
303 * When the summary IRQ is raised, any number of GPIO lines may be high.
304 * It is the job of the summary handler to find all those GPIO lines
305 * which have been set as summary IRQ lines and which are triggered,
306 * and to call their interrupt handlers.
307 */
308static void msm_summary_irq_handler(struct irq_desc *desc)
309{
310 unsigned long i;
311 struct irq_chip *chip = irq_desc_get_chip(desc);
312
313 chained_irq_enter(chip, desc);
314
315 for_each_set_bit(i, msm_gpio.enabled_irqs, MAX_NR_GPIO) {
316 if (readl(GPIO_INTR_STATUS(i)) & BIT(INTR_STATUS))
317 generic_handle_irq(irq_find_mapping(msm_gpio.domain,
318 i));
319 }
320
321 chained_irq_exit(chip, desc);
322}
323
324static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
325{
326 int gpio = d->hwirq;
327
328 if (on) {
329 if (bitmap_empty(msm_gpio.wake_irqs, MAX_NR_GPIO))
330 irq_set_irq_wake(msm_gpio.summary_irq, 1);
331 set_bit(gpio, msm_gpio.wake_irqs);
332 } else {
333 clear_bit(gpio, msm_gpio.wake_irqs);
334 if (bitmap_empty(msm_gpio.wake_irqs, MAX_NR_GPIO))
335 irq_set_irq_wake(msm_gpio.summary_irq, 0);
336 }
337
338 return 0;
339}
340
341static struct irq_chip msm_gpio_irq_chip = {
342 .name = "msmgpio",
343 .irq_mask = msm_gpio_irq_mask,
344 .irq_unmask = msm_gpio_irq_unmask,
345 .irq_ack = msm_gpio_irq_ack,
346 .irq_set_type = msm_gpio_irq_set_type,
347 .irq_set_wake = msm_gpio_irq_set_wake,
348};
349
350static struct lock_class_key msm_gpio_lock_class;
351
352static int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
353 irq_hw_number_t hwirq)
354{
355 irq_set_lockdep_class(irq, &msm_gpio_lock_class);
356 irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
357 handle_level_irq);
358
359 return 0;
360}
361
362static const struct irq_domain_ops msm_gpio_irq_domain_ops = {
363 .xlate = irq_domain_xlate_twocell,
364 .map = msm_gpio_irq_domain_map,
365};
366
367static int msm_gpio_probe(struct platform_device *pdev)
368{
369 int ret, ngpio;
370 struct resource *res;
371
372 if (of_property_read_u32(pdev->dev.of_node, "ngpio", &ngpio)) {
373 dev_err(&pdev->dev, "%s: ngpio property missing\n", __func__);
374 return -EINVAL;
375 }
376
377 if (ngpio > MAX_NR_GPIO)
378 WARN(1, "ngpio exceeds the MAX_NR_GPIO. Increase MAX_NR_GPIO\n");
379
380 bitmap_zero(msm_gpio.enabled_irqs, MAX_NR_GPIO);
381 bitmap_zero(msm_gpio.wake_irqs, MAX_NR_GPIO);
382 bitmap_zero(msm_gpio.dual_edge_irqs, MAX_NR_GPIO);
383
384 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
385 msm_gpio.msm_tlmm_base = devm_ioremap_resource(&pdev->dev, res);
386 if (IS_ERR(msm_gpio.msm_tlmm_base))
387 return PTR_ERR(msm_gpio.msm_tlmm_base);
388
389 msm_gpio.gpio_chip.ngpio = ngpio;
390 msm_gpio.gpio_chip.label = pdev->name;
391 msm_gpio.gpio_chip.dev = &pdev->dev;
392 msm_gpio.gpio_chip.base = 0;
393 msm_gpio.gpio_chip.direction_input = msm_gpio_direction_input;
394 msm_gpio.gpio_chip.direction_output = msm_gpio_direction_output;
395 msm_gpio.gpio_chip.get = msm_gpio_get;
396 msm_gpio.gpio_chip.set = msm_gpio_set;
397 msm_gpio.gpio_chip.to_irq = msm_gpio_to_irq;
398 msm_gpio.gpio_chip.request = msm_gpio_request;
399 msm_gpio.gpio_chip.free = msm_gpio_free;
400
401 ret = gpiochip_add(&msm_gpio.gpio_chip);
402 if (ret < 0) {
403 dev_err(&pdev->dev, "gpiochip_add failed with error %d\n", ret);
404 return ret;
405 }
406
407 msm_gpio.summary_irq = platform_get_irq(pdev, 0);
408 if (msm_gpio.summary_irq < 0) {
409 dev_err(&pdev->dev, "No Summary irq defined for msmgpio\n");
410 return msm_gpio.summary_irq;
411 }
412
413 msm_gpio.domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
414 &msm_gpio_irq_domain_ops,
415 &msm_gpio);
416 if (!msm_gpio.domain)
417 return -ENODEV;
418
419 irq_set_chained_handler(msm_gpio.summary_irq, msm_summary_irq_handler);
420
421 return 0;
422}
423
424static const struct of_device_id msm_gpio_of_match[] = {
425 { .compatible = "qcom,msm-gpio", },
426 { },
427};
428MODULE_DEVICE_TABLE(of, msm_gpio_of_match);
429
430static int msm_gpio_remove(struct platform_device *dev)
431{
432 gpiochip_remove(&msm_gpio.gpio_chip);
433
434 irq_set_handler(msm_gpio.summary_irq, NULL);
435
436 return 0;
437}
438
439static struct platform_driver msm_gpio_driver = {
440 .probe = msm_gpio_probe,
441 .remove = msm_gpio_remove,
442 .driver = {
443 .name = "msmgpio",
444 .of_match_table = msm_gpio_of_match,
445 },
446};
447
448module_platform_driver(msm_gpio_driver)
449
450MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
451MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs");
452MODULE_LICENSE("GPL v2");
453MODULE_ALIAS("platform:msmgpio");
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index df418b81456d..d428b97876c5 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -185,16 +185,6 @@ static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
185 * Functions implementing the gpio_chip methods 185 * Functions implementing the gpio_chip methods
186 */ 186 */
187 187
188static int mvebu_gpio_request(struct gpio_chip *chip, unsigned pin)
189{
190 return pinctrl_request_gpio(chip->base + pin);
191}
192
193static void mvebu_gpio_free(struct gpio_chip *chip, unsigned pin)
194{
195 pinctrl_free_gpio(chip->base + pin);
196}
197
198static void mvebu_gpio_set(struct gpio_chip *chip, unsigned pin, int value) 188static void mvebu_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
199{ 189{
200 struct mvebu_gpio_chip *mvchip = 190 struct mvebu_gpio_chip *mvchip =
@@ -709,8 +699,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
709 mvchip->soc_variant = soc_variant; 699 mvchip->soc_variant = soc_variant;
710 mvchip->chip.label = dev_name(&pdev->dev); 700 mvchip->chip.label = dev_name(&pdev->dev);
711 mvchip->chip.dev = &pdev->dev; 701 mvchip->chip.dev = &pdev->dev;
712 mvchip->chip.request = mvebu_gpio_request; 702 mvchip->chip.request = gpiochip_generic_request;
713 mvchip->chip.free = mvebu_gpio_free; 703 mvchip->chip.free = gpiochip_generic_free;
714 mvchip->chip.direction_input = mvebu_gpio_direction_input; 704 mvchip->chip.direction_input = mvebu_gpio_direction_input;
715 mvchip->chip.get = mvebu_gpio_get; 705 mvchip->chip.get = mvebu_gpio_get;
716 mvchip->chip.direction_output = mvebu_gpio_direction_output; 706 mvchip->chip.direction_output = mvebu_gpio_direction_output;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 5236db161e76..56d2d026e62e 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -51,7 +51,7 @@ struct gpio_regs {
51struct gpio_bank { 51struct gpio_bank {
52 struct list_head node; 52 struct list_head node;
53 void __iomem *base; 53 void __iomem *base;
54 u16 irq; 54 int irq;
55 u32 non_wakeup_gpios; 55 u32 non_wakeup_gpios;
56 u32 enabled_non_wakeup_gpios; 56 u32 enabled_non_wakeup_gpios;
57 struct gpio_regs context; 57 struct gpio_regs context;
@@ -59,6 +59,7 @@ struct gpio_bank {
59 u32 level_mask; 59 u32 level_mask;
60 u32 toggle_mask; 60 u32 toggle_mask;
61 raw_spinlock_t lock; 61 raw_spinlock_t lock;
62 raw_spinlock_t wa_lock;
62 struct gpio_chip chip; 63 struct gpio_chip chip;
63 struct clk *dbck; 64 struct clk *dbck;
64 u32 mod_usage; 65 u32 mod_usage;
@@ -496,9 +497,6 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
496 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 497 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
497 return -EINVAL; 498 return -EINVAL;
498 499
499 if (!BANK_USED(bank))
500 pm_runtime_get_sync(bank->dev);
501
502 raw_spin_lock_irqsave(&bank->lock, flags); 500 raw_spin_lock_irqsave(&bank->lock, flags);
503 retval = omap_set_gpio_triggering(bank, offset, type); 501 retval = omap_set_gpio_triggering(bank, offset, type);
504 if (retval) { 502 if (retval) {
@@ -521,8 +519,6 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
521 return 0; 519 return 0;
522 520
523error: 521error:
524 if (!BANK_USED(bank))
525 pm_runtime_put(bank->dev);
526 return retval; 522 return retval;
527} 523}
528 524
@@ -654,8 +650,13 @@ static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
654{ 650{
655 struct gpio_bank *bank = omap_irq_data_get_bank(d); 651 struct gpio_bank *bank = omap_irq_data_get_bank(d);
656 unsigned offset = d->hwirq; 652 unsigned offset = d->hwirq;
653 int ret;
654
655 ret = omap_set_gpio_wakeup(bank, offset, enable);
656 if (!ret)
657 ret = irq_set_irq_wake(bank->irq, enable);
657 658
658 return omap_set_gpio_wakeup(bank, offset, enable); 659 return ret;
659} 660}
660 661
661static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 662static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -709,26 +710,21 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
709 * line's interrupt handler has been run, we may miss some nested 710 * line's interrupt handler has been run, we may miss some nested
710 * interrupts. 711 * interrupts.
711 */ 712 */
712static void omap_gpio_irq_handler(struct irq_desc *desc) 713static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
713{ 714{
714 void __iomem *isr_reg = NULL; 715 void __iomem *isr_reg = NULL;
715 u32 isr; 716 u32 isr;
716 unsigned int bit; 717 unsigned int bit;
717 struct gpio_bank *bank; 718 struct gpio_bank *bank = gpiobank;
718 int unmasked = 0; 719 unsigned long wa_lock_flags;
719 struct irq_chip *irqchip = irq_desc_get_chip(desc);
720 struct gpio_chip *chip = irq_desc_get_handler_data(desc);
721 unsigned long lock_flags; 720 unsigned long lock_flags;
722 721
723 chained_irq_enter(irqchip, desc);
724
725 bank = container_of(chip, struct gpio_bank, chip);
726 isr_reg = bank->base + bank->regs->irqstatus; 722 isr_reg = bank->base + bank->regs->irqstatus;
727 pm_runtime_get_sync(bank->dev);
728
729 if (WARN_ON(!isr_reg)) 723 if (WARN_ON(!isr_reg))
730 goto exit; 724 goto exit;
731 725
726 pm_runtime_get_sync(bank->dev);
727
732 while (1) { 728 while (1) {
733 u32 isr_saved, level_mask = 0; 729 u32 isr_saved, level_mask = 0;
734 u32 enabled; 730 u32 enabled;
@@ -750,13 +746,6 @@ static void omap_gpio_irq_handler(struct irq_desc *desc)
750 746
751 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 747 raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
752 748
753 /* if there is only edge sensitive GPIO pin interrupts
754 configured, we could unmask GPIO bank interrupt immediately */
755 if (!level_mask && !unmasked) {
756 unmasked = 1;
757 chained_irq_exit(irqchip, desc);
758 }
759
760 if (!isr) 749 if (!isr)
761 break; 750 break;
762 751
@@ -777,18 +766,18 @@ static void omap_gpio_irq_handler(struct irq_desc *desc)
777 766
778 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 767 raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
779 768
769 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
770
780 generic_handle_irq(irq_find_mapping(bank->chip.irqdomain, 771 generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
781 bit)); 772 bit));
773
774 raw_spin_unlock_irqrestore(&bank->wa_lock,
775 wa_lock_flags);
782 } 776 }
783 } 777 }
784 /* if bank has any level sensitive GPIO pin interrupt
785 configured, we must unmask the bank interrupt only after
786 handler(s) are executed in order to avoid spurious bank
787 interrupt */
788exit: 778exit:
789 if (!unmasked)
790 chained_irq_exit(irqchip, desc);
791 pm_runtime_put(bank->dev); 779 pm_runtime_put(bank->dev);
780 return IRQ_HANDLED;
792} 781}
793 782
794static unsigned int omap_gpio_irq_startup(struct irq_data *d) 783static unsigned int omap_gpio_irq_startup(struct irq_data *d)
@@ -797,9 +786,6 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d)
797 unsigned long flags; 786 unsigned long flags;
798 unsigned offset = d->hwirq; 787 unsigned offset = d->hwirq;
799 788
800 if (!BANK_USED(bank))
801 pm_runtime_get_sync(bank->dev);
802
803 raw_spin_lock_irqsave(&bank->lock, flags); 789 raw_spin_lock_irqsave(&bank->lock, flags);
804 790
805 if (!LINE_USED(bank->mod_usage, offset)) 791 if (!LINE_USED(bank->mod_usage, offset))
@@ -815,8 +801,6 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d)
815 return 0; 801 return 0;
816err: 802err:
817 raw_spin_unlock_irqrestore(&bank->lock, flags); 803 raw_spin_unlock_irqrestore(&bank->lock, flags);
818 if (!BANK_USED(bank))
819 pm_runtime_put(bank->dev);
820 return -EINVAL; 804 return -EINVAL;
821} 805}
822 806
@@ -835,6 +819,19 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
835 omap_clear_gpio_debounce(bank, offset); 819 omap_clear_gpio_debounce(bank, offset);
836 omap_disable_gpio_module(bank, offset); 820 omap_disable_gpio_module(bank, offset);
837 raw_spin_unlock_irqrestore(&bank->lock, flags); 821 raw_spin_unlock_irqrestore(&bank->lock, flags);
822}
823
824static void omap_gpio_irq_bus_lock(struct irq_data *data)
825{
826 struct gpio_bank *bank = omap_irq_data_get_bank(data);
827
828 if (!BANK_USED(bank))
829 pm_runtime_get_sync(bank->dev);
830}
831
832static void gpio_irq_bus_sync_unlock(struct irq_data *data)
833{
834 struct gpio_bank *bank = omap_irq_data_get_bank(data);
838 835
839 /* 836 /*
840 * If this is the last IRQ to be freed in the bank, 837 * If this is the last IRQ to be freed in the bank,
@@ -1132,7 +1129,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1132 } 1129 }
1133 1130
1134 ret = gpiochip_irqchip_add(&bank->chip, irqc, 1131 ret = gpiochip_irqchip_add(&bank->chip, irqc,
1135 irq_base, omap_gpio_irq_handler, 1132 irq_base, handle_bad_irq,
1136 IRQ_TYPE_NONE); 1133 IRQ_TYPE_NONE);
1137 1134
1138 if (ret) { 1135 if (ret) {
@@ -1141,10 +1138,14 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1141 return -ENODEV; 1138 return -ENODEV;
1142 } 1139 }
1143 1140
1144 gpiochip_set_chained_irqchip(&bank->chip, irqc, 1141 gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL);
1145 bank->irq, omap_gpio_irq_handler);
1146 1142
1147 return 0; 1143 ret = devm_request_irq(bank->dev, bank->irq, omap_gpio_irq_handler,
1144 0, dev_name(bank->dev), bank);
1145 if (ret)
1146 gpiochip_remove(&bank->chip);
1147
1148 return ret;
1148} 1149}
1149 1150
1150static const struct of_device_id omap_gpio_match[]; 1151static const struct of_device_id omap_gpio_match[];
@@ -1183,6 +1184,8 @@ static int omap_gpio_probe(struct platform_device *pdev)
1183 irqc->irq_unmask = omap_gpio_unmask_irq, 1184 irqc->irq_unmask = omap_gpio_unmask_irq,
1184 irqc->irq_set_type = omap_gpio_irq_type, 1185 irqc->irq_set_type = omap_gpio_irq_type,
1185 irqc->irq_set_wake = omap_gpio_wake_enable, 1186 irqc->irq_set_wake = omap_gpio_wake_enable,
1187 irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
1188 irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
1186 irqc->name = dev_name(&pdev->dev); 1189 irqc->name = dev_name(&pdev->dev);
1187 1190
1188 bank->irq = platform_get_irq(pdev, 0); 1191 bank->irq = platform_get_irq(pdev, 0);
@@ -1224,6 +1227,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
1224 bank->set_dataout = omap_set_gpio_dataout_mask; 1227 bank->set_dataout = omap_set_gpio_dataout_mask;
1225 1228
1226 raw_spin_lock_init(&bank->lock); 1229 raw_spin_lock_init(&bank->lock);
1230 raw_spin_lock_init(&bank->wa_lock);
1227 1231
1228 /* Static mapping, never released */ 1232 /* Static mapping, never released */
1229 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1233 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 50caeb1ee350..2d4892cc70fb 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -21,6 +21,7 @@
21#ifdef CONFIG_OF_GPIO 21#ifdef CONFIG_OF_GPIO
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#endif 23#endif
24#include <linux/acpi.h>
24 25
25#define PCA953X_INPUT 0 26#define PCA953X_INPUT 0
26#define PCA953X_OUTPUT 1 27#define PCA953X_OUTPUT 1
@@ -42,6 +43,9 @@
42#define PCA_INT 0x0100 43#define PCA_INT 0x0100
43#define PCA953X_TYPE 0x1000 44#define PCA953X_TYPE 0x1000
44#define PCA957X_TYPE 0x2000 45#define PCA957X_TYPE 0x2000
46#define PCA_TYPE_MASK 0xF000
47
48#define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK)
45 49
46static const struct i2c_device_id pca953x_id[] = { 50static const struct i2c_device_id pca953x_id[] = {
47 { "pca9505", 40 | PCA953X_TYPE | PCA_INT, }, 51 { "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
@@ -67,11 +71,18 @@ static const struct i2c_device_id pca953x_id[] = {
67 { "tca6408", 8 | PCA953X_TYPE | PCA_INT, }, 71 { "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
68 { "tca6416", 16 | PCA953X_TYPE | PCA_INT, }, 72 { "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
69 { "tca6424", 24 | PCA953X_TYPE | PCA_INT, }, 73 { "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
74 { "tca9539", 16 | PCA953X_TYPE | PCA_INT, },
70 { "xra1202", 8 | PCA953X_TYPE }, 75 { "xra1202", 8 | PCA953X_TYPE },
71 { } 76 { }
72}; 77};
73MODULE_DEVICE_TABLE(i2c, pca953x_id); 78MODULE_DEVICE_TABLE(i2c, pca953x_id);
74 79
80static const struct acpi_device_id pca953x_acpi_ids[] = {
81 { "INT3491", 16 | PCA953X_TYPE | PCA_INT, },
82 { }
83};
84MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
85
75#define MAX_BANK 5 86#define MAX_BANK 5
76#define BANK_SZ 8 87#define BANK_SZ 8
77 88
@@ -95,6 +106,7 @@ struct pca953x_chip {
95 struct gpio_chip gpio_chip; 106 struct gpio_chip gpio_chip;
96 const char *const *names; 107 const char *const *names;
97 int chip_type; 108 int chip_type;
109 unsigned long driver_data;
98}; 110};
99 111
100static inline struct pca953x_chip *to_pca(struct gpio_chip *gc) 112static inline struct pca953x_chip *to_pca(struct gpio_chip *gc)
@@ -517,14 +529,13 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
517} 529}
518 530
519static int pca953x_irq_setup(struct pca953x_chip *chip, 531static int pca953x_irq_setup(struct pca953x_chip *chip,
520 const struct i2c_device_id *id,
521 int irq_base) 532 int irq_base)
522{ 533{
523 struct i2c_client *client = chip->client; 534 struct i2c_client *client = chip->client;
524 int ret, i, offset = 0; 535 int ret, i, offset = 0;
525 536
526 if (client->irq && irq_base != -1 537 if (client->irq && irq_base != -1
527 && (id->driver_data & PCA_INT)) { 538 && (chip->driver_data & PCA_INT)) {
528 539
529 switch (chip->chip_type) { 540 switch (chip->chip_type) {
530 case PCA953X_TYPE: 541 case PCA953X_TYPE:
@@ -581,12 +592,11 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
581 592
582#else /* CONFIG_GPIO_PCA953X_IRQ */ 593#else /* CONFIG_GPIO_PCA953X_IRQ */
583static int pca953x_irq_setup(struct pca953x_chip *chip, 594static int pca953x_irq_setup(struct pca953x_chip *chip,
584 const struct i2c_device_id *id,
585 int irq_base) 595 int irq_base)
586{ 596{
587 struct i2c_client *client = chip->client; 597 struct i2c_client *client = chip->client;
588 598
589 if (irq_base != -1 && (id->driver_data & PCA_INT)) 599 if (irq_base != -1 && (chip->driver_data & PCA_INT))
590 dev_warn(&client->dev, "interrupt support not compiled in\n"); 600 dev_warn(&client->dev, "interrupt support not compiled in\n");
591 601
592 return 0; 602 return 0;
@@ -635,11 +645,15 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
635 memset(val, 0xFF, NBANK(chip)); 645 memset(val, 0xFF, NBANK(chip));
636 else 646 else
637 memset(val, 0, NBANK(chip)); 647 memset(val, 0, NBANK(chip));
638 pca953x_write_regs(chip, PCA957X_INVRT, val); 648 ret = pca953x_write_regs(chip, PCA957X_INVRT, val);
649 if (ret)
650 goto out;
639 651
640 /* To enable register 6, 7 to control pull up and pull down */ 652 /* To enable register 6, 7 to control pull up and pull down */
641 memset(val, 0x02, NBANK(chip)); 653 memset(val, 0x02, NBANK(chip));
642 pca953x_write_regs(chip, PCA957X_BKEN, val); 654 ret = pca953x_write_regs(chip, PCA957X_BKEN, val);
655 if (ret)
656 goto out;
643 657
644 return 0; 658 return 0;
645out: 659out:
@@ -673,14 +687,26 @@ static int pca953x_probe(struct i2c_client *client,
673 687
674 chip->client = client; 688 chip->client = client;
675 689
676 chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE); 690 if (id) {
691 chip->driver_data = id->driver_data;
692 } else {
693 const struct acpi_device_id *id;
694
695 id = acpi_match_device(pca953x_acpi_ids, &client->dev);
696 if (!id)
697 return -ENODEV;
698
699 chip->driver_data = id->driver_data;
700 }
701
702 chip->chip_type = PCA_CHIP_TYPE(chip->driver_data);
677 703
678 mutex_init(&chip->i2c_lock); 704 mutex_init(&chip->i2c_lock);
679 705
680 /* initialize cached registers from their original values. 706 /* initialize cached registers from their original values.
681 * we can't share this chip with another i2c master. 707 * we can't share this chip with another i2c master.
682 */ 708 */
683 pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK); 709 pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
684 710
685 if (chip->chip_type == PCA953X_TYPE) 711 if (chip->chip_type == PCA953X_TYPE)
686 ret = device_pca953x_init(chip, invert); 712 ret = device_pca953x_init(chip, invert);
@@ -693,7 +719,7 @@ static int pca953x_probe(struct i2c_client *client,
693 if (ret) 719 if (ret)
694 return ret; 720 return ret;
695 721
696 ret = pca953x_irq_setup(chip, id, irq_base); 722 ret = pca953x_irq_setup(chip, irq_base);
697 if (ret) 723 if (ret)
698 return ret; 724 return ret;
699 725
@@ -765,6 +791,7 @@ static struct i2c_driver pca953x_driver = {
765 .driver = { 791 .driver = {
766 .name = "pca953x", 792 .name = "pca953x",
767 .of_match_table = pca953x_dt_ids, 793 .of_match_table = pca953x_dt_ids,
794 .acpi_match_table = ACPI_PTR(pca953x_acpi_ids),
768 }, 795 },
769 .probe = pca953x_probe, 796 .probe = pca953x_probe,
770 .remove = pca953x_remove, 797 .remove = pca953x_remove,
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 229ef653e0f8..4d4b37676702 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -52,36 +52,12 @@ struct pl061_gpio {
52 52
53 void __iomem *base; 53 void __iomem *base;
54 struct gpio_chip gc; 54 struct gpio_chip gc;
55 bool uses_pinctrl;
56 55
57#ifdef CONFIG_PM 56#ifdef CONFIG_PM
58 struct pl061_context_save_regs csave_regs; 57 struct pl061_context_save_regs csave_regs;
59#endif 58#endif
60}; 59};
61 60
62static int pl061_gpio_request(struct gpio_chip *gc, unsigned offset)
63{
64 /*
65 * Map back to global GPIO space and request muxing, the direction
66 * parameter does not matter for this controller.
67 */
68 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
69 int gpio = gc->base + offset;
70
71 if (chip->uses_pinctrl)
72 return pinctrl_request_gpio(gpio);
73 return 0;
74}
75
76static void pl061_gpio_free(struct gpio_chip *gc, unsigned offset)
77{
78 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
79 int gpio = gc->base + offset;
80
81 if (chip->uses_pinctrl)
82 pinctrl_free_gpio(gpio);
83}
84
85static int pl061_direction_input(struct gpio_chip *gc, unsigned offset) 61static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
86{ 62{
87 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); 63 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
@@ -152,6 +128,17 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
152 if (offset < 0 || offset >= PL061_GPIO_NR) 128 if (offset < 0 || offset >= PL061_GPIO_NR)
153 return -EINVAL; 129 return -EINVAL;
154 130
131 if ((trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) &&
132 (trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)))
133 {
134 dev_err(gc->dev,
135 "trying to configure line %d for both level and edge "
136 "detection, choose one!\n",
137 offset);
138 return -EINVAL;
139 }
140
141
155 spin_lock_irqsave(&chip->lock, flags); 142 spin_lock_irqsave(&chip->lock, flags);
156 143
157 gpioiev = readb(chip->base + GPIOIEV); 144 gpioiev = readb(chip->base + GPIOIEV);
@@ -159,23 +146,53 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
159 gpioibe = readb(chip->base + GPIOIBE); 146 gpioibe = readb(chip->base + GPIOIBE);
160 147
161 if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { 148 if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
149 bool polarity = trigger & IRQ_TYPE_LEVEL_HIGH;
150
151 /* Disable edge detection */
152 gpioibe &= ~bit;
153 /* Enable level detection */
162 gpiois |= bit; 154 gpiois |= bit;
163 if (trigger & IRQ_TYPE_LEVEL_HIGH) 155 /* Select polarity */
156 if (polarity)
164 gpioiev |= bit; 157 gpioiev |= bit;
165 else 158 else
166 gpioiev &= ~bit; 159 gpioiev &= ~bit;
167 } else 160 irq_set_handler_locked(d, handle_level_irq);
161 dev_dbg(gc->dev, "line %d: IRQ on %s level\n",
162 offset,
163 polarity ? "HIGH" : "LOW");
164 } else if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
165 /* Disable level detection */
168 gpiois &= ~bit; 166 gpiois &= ~bit;
169 167 /* Select both edges, setting this makes GPIOEV be ignored */
170 if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
171 /* Setting this makes GPIOEV be ignored */
172 gpioibe |= bit; 168 gpioibe |= bit;
173 else { 169 irq_set_handler_locked(d, handle_edge_irq);
170 dev_dbg(gc->dev, "line %d: IRQ on both edges\n", offset);
171 } else if ((trigger & IRQ_TYPE_EDGE_RISING) ||
172 (trigger & IRQ_TYPE_EDGE_FALLING)) {
173 bool rising = trigger & IRQ_TYPE_EDGE_RISING;
174
175 /* Disable level detection */
176 gpiois &= ~bit;
177 /* Clear detection on both edges */
174 gpioibe &= ~bit; 178 gpioibe &= ~bit;
175 if (trigger & IRQ_TYPE_EDGE_RISING) 179 /* Select edge */
180 if (rising)
176 gpioiev |= bit; 181 gpioiev |= bit;
177 else if (trigger & IRQ_TYPE_EDGE_FALLING) 182 else
178 gpioiev &= ~bit; 183 gpioiev &= ~bit;
184 irq_set_handler_locked(d, handle_edge_irq);
185 dev_dbg(gc->dev, "line %d: IRQ on %s edge\n",
186 offset,
187 rising ? "RISING" : "FALLING");
188 } else {
189 /* No trigger: disable everything */
190 gpiois &= ~bit;
191 gpioibe &= ~bit;
192 gpioiev &= ~bit;
193 irq_set_handler_locked(d, handle_bad_irq);
194 dev_warn(gc->dev, "no trigger selected for line %d\n",
195 offset);
179 } 196 }
180 197
181 writeb(gpiois, chip->base + GPIOIS); 198 writeb(gpiois, chip->base + GPIOIS);
@@ -198,7 +215,6 @@ static void pl061_irq_handler(struct irq_desc *desc)
198 chained_irq_enter(irqchip, desc); 215 chained_irq_enter(irqchip, desc);
199 216
200 pending = readb(chip->base + GPIOMIS); 217 pending = readb(chip->base + GPIOMIS);
201 writeb(pending, chip->base + GPIOIC);
202 if (pending) { 218 if (pending) {
203 for_each_set_bit(offset, &pending, PL061_GPIO_NR) 219 for_each_set_bit(offset, &pending, PL061_GPIO_NR)
204 generic_handle_irq(irq_find_mapping(gc->irqdomain, 220 generic_handle_irq(irq_find_mapping(gc->irqdomain,
@@ -234,8 +250,28 @@ static void pl061_irq_unmask(struct irq_data *d)
234 spin_unlock(&chip->lock); 250 spin_unlock(&chip->lock);
235} 251}
236 252
253/**
254 * pl061_irq_ack() - ACK an edge IRQ
255 * @d: IRQ data for this IRQ
256 *
257 * This gets called from the edge IRQ handler to ACK the edge IRQ
258 * in the GPIOIC (interrupt-clear) register. For level IRQs this is
259 * not needed: these go away when the level signal goes away.
260 */
261static void pl061_irq_ack(struct irq_data *d)
262{
263 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
264 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
265 u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
266
267 spin_lock(&chip->lock);
268 writeb(mask, chip->base + GPIOIC);
269 spin_unlock(&chip->lock);
270}
271
237static struct irq_chip pl061_irqchip = { 272static struct irq_chip pl061_irqchip = {
238 .name = "pl061", 273 .name = "pl061",
274 .irq_ack = pl061_irq_ack,
239 .irq_mask = pl061_irq_mask, 275 .irq_mask = pl061_irq_mask,
240 .irq_unmask = pl061_irq_unmask, 276 .irq_unmask = pl061_irq_unmask,
241 .irq_set_type = pl061_irq_type, 277 .irq_set_type = pl061_irq_type,
@@ -269,11 +305,11 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
269 return PTR_ERR(chip->base); 305 return PTR_ERR(chip->base);
270 306
271 spin_lock_init(&chip->lock); 307 spin_lock_init(&chip->lock);
272 if (of_property_read_bool(dev->of_node, "gpio-ranges")) 308 if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
273 chip->uses_pinctrl = true; 309 chip->gc.request = gpiochip_generic_request;
310 chip->gc.free = gpiochip_generic_free;
311 }
274 312
275 chip->gc.request = pl061_gpio_request;
276 chip->gc.free = pl061_gpio_free;
277 chip->gc.direction_input = pl061_direction_input; 313 chip->gc.direction_input = pl061_direction_input;
278 chip->gc.direction_output = pl061_direction_output; 314 chip->gc.direction_output = pl061_direction_output;
279 chip->gc.get = pl061_get_value; 315 chip->gc.get = pl061_get_value;
@@ -298,7 +334,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
298 } 334 }
299 335
300 ret = gpiochip_irqchip_add(&chip->gc, &pl061_irqchip, 336 ret = gpiochip_irqchip_add(&chip->gc, &pl061_irqchip,
301 irq_base, handle_simple_irq, 337 irq_base, handle_bad_irq,
302 IRQ_TYPE_NONE); 338 IRQ_TYPE_NONE);
303 if (ret) { 339 if (ret) {
304 dev_info(&adev->dev, "could not add irqchip\n"); 340 dev_info(&adev->dev, "could not add irqchip\n");
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 65bc9f47a68e..34b02b42ab9e 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -102,7 +102,7 @@ static int sdv_xlate(struct irq_domain *h, struct device_node *node,
102{ 102{
103 u32 line, type; 103 u32 line, type;
104 104
105 if (node != h->of_node) 105 if (node != irq_domain_get_of_node(h))
106 return -EINVAL; 106 return -EINVAL;
107 107
108 if (intsize < 2) 108 if (intsize < 2)
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index 9c6b96707c9f..76f920173a2f 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -160,6 +160,11 @@ static const struct of_device_id sx150x_of_match[] = {
160}; 160};
161MODULE_DEVICE_TABLE(of, sx150x_of_match); 161MODULE_DEVICE_TABLE(of, sx150x_of_match);
162 162
163struct sx150x_chip *to_sx150x(struct gpio_chip *gc)
164{
165 return container_of(gc, struct sx150x_chip, gpio_chip);
166}
167
163static s32 sx150x_i2c_write(struct i2c_client *client, u8 reg, u8 val) 168static s32 sx150x_i2c_write(struct i2c_client *client, u8 reg, u8 val)
164{ 169{
165 s32 err = i2c_smbus_write_byte_data(client, reg, val); 170 s32 err = i2c_smbus_write_byte_data(client, reg, val);
@@ -296,11 +301,9 @@ static int sx150x_io_output(struct sx150x_chip *chip, unsigned offset, int val)
296 301
297static int sx150x_gpio_get(struct gpio_chip *gc, unsigned offset) 302static int sx150x_gpio_get(struct gpio_chip *gc, unsigned offset)
298{ 303{
299 struct sx150x_chip *chip; 304 struct sx150x_chip *chip = to_sx150x(gc);
300 int status = -EINVAL; 305 int status = -EINVAL;
301 306
302 chip = container_of(gc, struct sx150x_chip, gpio_chip);
303
304 if (!offset_is_oscio(chip, offset)) { 307 if (!offset_is_oscio(chip, offset)) {
305 mutex_lock(&chip->lock); 308 mutex_lock(&chip->lock);
306 status = sx150x_get_io(chip, offset); 309 status = sx150x_get_io(chip, offset);
@@ -312,9 +315,7 @@ static int sx150x_gpio_get(struct gpio_chip *gc, unsigned offset)
312 315
313static void sx150x_gpio_set(struct gpio_chip *gc, unsigned offset, int val) 316static void sx150x_gpio_set(struct gpio_chip *gc, unsigned offset, int val)
314{ 317{
315 struct sx150x_chip *chip; 318 struct sx150x_chip *chip = to_sx150x(gc);
316
317 chip = container_of(gc, struct sx150x_chip, gpio_chip);
318 319
319 mutex_lock(&chip->lock); 320 mutex_lock(&chip->lock);
320 if (offset_is_oscio(chip, offset)) 321 if (offset_is_oscio(chip, offset))
@@ -326,11 +327,9 @@ static void sx150x_gpio_set(struct gpio_chip *gc, unsigned offset, int val)
326 327
327static int sx150x_gpio_direction_input(struct gpio_chip *gc, unsigned offset) 328static int sx150x_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
328{ 329{
329 struct sx150x_chip *chip; 330 struct sx150x_chip *chip = to_sx150x(gc);
330 int status = -EINVAL; 331 int status = -EINVAL;
331 332
332 chip = container_of(gc, struct sx150x_chip, gpio_chip);
333
334 if (!offset_is_oscio(chip, offset)) { 333 if (!offset_is_oscio(chip, offset)) {
335 mutex_lock(&chip->lock); 334 mutex_lock(&chip->lock);
336 status = sx150x_io_input(chip, offset); 335 status = sx150x_io_input(chip, offset);
@@ -343,11 +342,9 @@ static int sx150x_gpio_direction_output(struct gpio_chip *gc,
343 unsigned offset, 342 unsigned offset,
344 int val) 343 int val)
345{ 344{
346 struct sx150x_chip *chip; 345 struct sx150x_chip *chip = to_sx150x(gc);
347 int status = 0; 346 int status = 0;
348 347
349 chip = container_of(gc, struct sx150x_chip, gpio_chip);
350
351 if (!offset_is_oscio(chip, offset)) { 348 if (!offset_is_oscio(chip, offset)) {
352 mutex_lock(&chip->lock); 349 mutex_lock(&chip->lock);
353 status = sx150x_io_output(chip, offset, val); 350 status = sx150x_io_output(chip, offset, val);
@@ -358,7 +355,7 @@ static int sx150x_gpio_direction_output(struct gpio_chip *gc,
358 355
359static void sx150x_irq_mask(struct irq_data *d) 356static void sx150x_irq_mask(struct irq_data *d)
360{ 357{
361 struct sx150x_chip *chip = irq_data_get_irq_chip_data(d); 358 struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d));
362 unsigned n = d->hwirq; 359 unsigned n = d->hwirq;
363 360
364 chip->irq_masked |= (1 << n); 361 chip->irq_masked |= (1 << n);
@@ -367,7 +364,7 @@ static void sx150x_irq_mask(struct irq_data *d)
367 364
368static void sx150x_irq_unmask(struct irq_data *d) 365static void sx150x_irq_unmask(struct irq_data *d)
369{ 366{
370 struct sx150x_chip *chip = irq_data_get_irq_chip_data(d); 367 struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d));
371 unsigned n = d->hwirq; 368 unsigned n = d->hwirq;
372 369
373 chip->irq_masked &= ~(1 << n); 370 chip->irq_masked &= ~(1 << n);
@@ -376,7 +373,7 @@ static void sx150x_irq_unmask(struct irq_data *d)
376 373
377static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type) 374static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
378{ 375{
379 struct sx150x_chip *chip = irq_data_get_irq_chip_data(d); 376 struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d));
380 unsigned n, val = 0; 377 unsigned n, val = 0;
381 378
382 if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 379 if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
@@ -431,14 +428,14 @@ static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
431 428
432static void sx150x_irq_bus_lock(struct irq_data *d) 429static void sx150x_irq_bus_lock(struct irq_data *d)
433{ 430{
434 struct sx150x_chip *chip = irq_data_get_irq_chip_data(d); 431 struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d));
435 432
436 mutex_lock(&chip->lock); 433 mutex_lock(&chip->lock);
437} 434}
438 435
439static void sx150x_irq_bus_sync_unlock(struct irq_data *d) 436static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
440{ 437{
441 struct sx150x_chip *chip = irq_data_get_irq_chip_data(d); 438 struct sx150x_chip *chip = to_sx150x(irq_data_get_irq_chip_data(d));
442 unsigned n; 439 unsigned n;
443 440
444 if (chip->irq_update == NO_UPDATE_PENDING) 441 if (chip->irq_update == NO_UPDATE_PENDING)
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 12c99d969b98..4356e6c20fc5 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -138,16 +138,6 @@ static int tb10x_gpio_direction_out(struct gpio_chip *chip,
138 return 0; 138 return 0;
139} 139}
140 140
141static int tb10x_gpio_request(struct gpio_chip *chip, unsigned offset)
142{
143 return pinctrl_request_gpio(chip->base + offset);
144}
145
146static void tb10x_gpio_free(struct gpio_chip *chip, unsigned offset)
147{
148 pinctrl_free_gpio(chip->base + offset);
149}
150
151static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 141static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
152{ 142{
153 struct tb10x_gpio *tb10x_gpio = to_tb10x_gpio(chip); 143 struct tb10x_gpio *tb10x_gpio = to_tb10x_gpio(chip);
@@ -213,8 +203,8 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
213 tb10x_gpio->gc.get = tb10x_gpio_get; 203 tb10x_gpio->gc.get = tb10x_gpio_get;
214 tb10x_gpio->gc.direction_output = tb10x_gpio_direction_out; 204 tb10x_gpio->gc.direction_output = tb10x_gpio_direction_out;
215 tb10x_gpio->gc.set = tb10x_gpio_set; 205 tb10x_gpio->gc.set = tb10x_gpio_set;
216 tb10x_gpio->gc.request = tb10x_gpio_request; 206 tb10x_gpio->gc.request = gpiochip_generic_request;
217 tb10x_gpio->gc.free = tb10x_gpio_free; 207 tb10x_gpio->gc.free = gpiochip_generic_free;
218 tb10x_gpio->gc.base = -1; 208 tb10x_gpio->gc.base = -1;
219 tb10x_gpio->gc.ngpio = ngpio; 209 tb10x_gpio->gc.ngpio = ngpio;
220 tb10x_gpio->gc.can_sleep = false; 210 tb10x_gpio->gc.can_sleep = false;
diff --git a/drivers/gpio/gpio-tz1090-pdc.c b/drivers/gpio/gpio-tz1090-pdc.c
index ede7e403ffde..3623d009d808 100644
--- a/drivers/gpio/gpio-tz1090-pdc.c
+++ b/drivers/gpio/gpio-tz1090-pdc.c
@@ -137,16 +137,6 @@ static void tz1090_pdc_gpio_set(struct gpio_chip *chip, unsigned int offset,
137 __global_unlock2(lstat); 137 __global_unlock2(lstat);
138} 138}
139 139
140static int tz1090_pdc_gpio_request(struct gpio_chip *chip, unsigned int offset)
141{
142 return pinctrl_request_gpio(chip->base + offset);
143}
144
145static void tz1090_pdc_gpio_free(struct gpio_chip *chip, unsigned int offset)
146{
147 pinctrl_free_gpio(chip->base + offset);
148}
149
150static int tz1090_pdc_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) 140static int tz1090_pdc_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
151{ 141{
152 struct tz1090_pdc_gpio *priv = to_pdc(chip); 142 struct tz1090_pdc_gpio *priv = to_pdc(chip);
@@ -203,8 +193,8 @@ static int tz1090_pdc_gpio_probe(struct platform_device *pdev)
203 priv->chip.direction_output = tz1090_pdc_gpio_direction_output; 193 priv->chip.direction_output = tz1090_pdc_gpio_direction_output;
204 priv->chip.get = tz1090_pdc_gpio_get; 194 priv->chip.get = tz1090_pdc_gpio_get;
205 priv->chip.set = tz1090_pdc_gpio_set; 195 priv->chip.set = tz1090_pdc_gpio_set;
206 priv->chip.free = tz1090_pdc_gpio_free; 196 priv->chip.free = gpiochip_generic_free;
207 priv->chip.request = tz1090_pdc_gpio_request; 197 priv->chip.request = gpiochip_generic_request;
208 priv->chip.to_irq = tz1090_pdc_gpio_to_irq; 198 priv->chip.to_irq = tz1090_pdc_gpio_to_irq;
209 priv->chip.of_node = np; 199 priv->chip.of_node = np;
210 200
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 069f9e4b7daa..87b950cec6ec 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -62,6 +62,11 @@ struct vf610_gpio_port {
62 62
63static struct irq_chip vf610_gpio_irq_chip; 63static struct irq_chip vf610_gpio_irq_chip;
64 64
65static struct vf610_gpio_port *to_vf610_gp(struct gpio_chip *gc)
66{
67 return container_of(gc, struct vf610_gpio_port, gc);
68}
69
65static const struct of_device_id vf610_gpio_dt_ids[] = { 70static const struct of_device_id vf610_gpio_dt_ids[] = {
66 { .compatible = "fsl,vf610-gpio" }, 71 { .compatible = "fsl,vf610-gpio" },
67 { /* sentinel */ } 72 { /* sentinel */ }
@@ -77,28 +82,16 @@ static inline u32 vf610_gpio_readl(void __iomem *reg)
77 return readl_relaxed(reg); 82 return readl_relaxed(reg);
78} 83}
79 84
80static int vf610_gpio_request(struct gpio_chip *chip, unsigned offset)
81{
82 return pinctrl_request_gpio(chip->base + offset);
83}
84
85static void vf610_gpio_free(struct gpio_chip *chip, unsigned offset)
86{
87 pinctrl_free_gpio(chip->base + offset);
88}
89
90static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio) 85static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
91{ 86{
92 struct vf610_gpio_port *port = 87 struct vf610_gpio_port *port = to_vf610_gp(gc);
93 container_of(gc, struct vf610_gpio_port, gc);
94 88
95 return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR) & BIT(gpio)); 89 return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR) & BIT(gpio));
96} 90}
97 91
98static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) 92static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
99{ 93{
100 struct vf610_gpio_port *port = 94 struct vf610_gpio_port *port = to_vf610_gp(gc);
101 container_of(gc, struct vf610_gpio_port, gc);
102 unsigned long mask = BIT(gpio); 95 unsigned long mask = BIT(gpio);
103 96
104 if (val) 97 if (val)
@@ -122,7 +115,8 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
122 115
123static void vf610_gpio_irq_handler(struct irq_desc *desc) 116static void vf610_gpio_irq_handler(struct irq_desc *desc)
124{ 117{
125 struct vf610_gpio_port *port = irq_desc_get_handler_data(desc); 118 struct vf610_gpio_port *port =
119 to_vf610_gp(irq_desc_get_handler_data(desc));
126 struct irq_chip *chip = irq_desc_get_chip(desc); 120 struct irq_chip *chip = irq_desc_get_chip(desc);
127 int pin; 121 int pin;
128 unsigned long irq_isfr; 122 unsigned long irq_isfr;
@@ -142,7 +136,8 @@ static void vf610_gpio_irq_handler(struct irq_desc *desc)
142 136
143static void vf610_gpio_irq_ack(struct irq_data *d) 137static void vf610_gpio_irq_ack(struct irq_data *d)
144{ 138{
145 struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d); 139 struct vf610_gpio_port *port =
140 to_vf610_gp(irq_data_get_irq_chip_data(d));
146 int gpio = d->hwirq; 141 int gpio = d->hwirq;
147 142
148 vf610_gpio_writel(BIT(gpio), port->base + PORT_ISFR); 143 vf610_gpio_writel(BIT(gpio), port->base + PORT_ISFR);
@@ -150,7 +145,8 @@ static void vf610_gpio_irq_ack(struct irq_data *d)
150 145
151static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type) 146static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
152{ 147{
153 struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d); 148 struct vf610_gpio_port *port =
149 to_vf610_gp(irq_data_get_irq_chip_data(d));
154 u8 irqc; 150 u8 irqc;
155 151
156 switch (type) { 152 switch (type) {
@@ -185,7 +181,8 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
185 181
186static void vf610_gpio_irq_mask(struct irq_data *d) 182static void vf610_gpio_irq_mask(struct irq_data *d)
187{ 183{
188 struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d); 184 struct vf610_gpio_port *port =
185 to_vf610_gp(irq_data_get_irq_chip_data(d));
189 void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq); 186 void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
190 187
191 vf610_gpio_writel(0, pcr_base); 188 vf610_gpio_writel(0, pcr_base);
@@ -193,7 +190,8 @@ static void vf610_gpio_irq_mask(struct irq_data *d)
193 190
194static void vf610_gpio_irq_unmask(struct irq_data *d) 191static void vf610_gpio_irq_unmask(struct irq_data *d)
195{ 192{
196 struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d); 193 struct vf610_gpio_port *port =
194 to_vf610_gp(irq_data_get_irq_chip_data(d));
197 void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq); 195 void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
198 196
199 vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET, 197 vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET,
@@ -202,7 +200,8 @@ static void vf610_gpio_irq_unmask(struct irq_data *d)
202 200
203static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable) 201static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
204{ 202{
205 struct vf610_gpio_port *port = irq_data_get_irq_chip_data(d); 203 struct vf610_gpio_port *port =
204 to_vf610_gp(irq_data_get_irq_chip_data(d));
206 205
207 if (enable) 206 if (enable)
208 enable_irq_wake(port->irq); 207 enable_irq_wake(port->irq);
@@ -255,8 +254,8 @@ static int vf610_gpio_probe(struct platform_device *pdev)
255 gc->ngpio = VF610_GPIO_PER_PORT; 254 gc->ngpio = VF610_GPIO_PER_PORT;
256 gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT; 255 gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;
257 256
258 gc->request = vf610_gpio_request; 257 gc->request = gpiochip_generic_request;
259 gc->free = vf610_gpio_free; 258 gc->free = gpiochip_generic_free;
260 gc->direction_input = vf610_gpio_direction_input; 259 gc->direction_input = vf610_gpio_direction_input;
261 gc->get = vf610_gpio_get; 260 gc->get = vf610_gpio_get;
262 gc->direction_output = vf610_gpio_direction_output; 261 gc->direction_output = vf610_gpio_direction_output;
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index e02499a15e72..bc06a2cd2c1d 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/irqchip/chained_irq.h>
21 22
22/* 23/*
23 * XLP GPIO has multiple 32 bit registers for each feature where each register 24 * XLP GPIO has multiple 32 bit registers for each feature where each register
@@ -208,25 +209,28 @@ static struct irq_chip xlp_gpio_irq_chip = {
208 .flags = IRQCHIP_ONESHOT_SAFE, 209 .flags = IRQCHIP_ONESHOT_SAFE,
209}; 210};
210 211
211static irqreturn_t xlp_gpio_generic_handler(int irq, void *data) 212static void xlp_gpio_generic_handler(struct irq_desc *desc)
212{ 213{
213 struct xlp_gpio_priv *priv = data; 214 struct xlp_gpio_priv *priv = irq_desc_get_handler_data(desc);
215 struct irq_chip *irqchip = irq_desc_get_chip(desc);
214 int gpio, regoff; 216 int gpio, regoff;
215 u32 gpio_stat; 217 u32 gpio_stat;
216 218
217 regoff = -1; 219 regoff = -1;
218 gpio_stat = 0; 220 gpio_stat = 0;
221
222 chained_irq_enter(irqchip, desc);
219 for_each_set_bit(gpio, priv->gpio_enabled_mask, XLP_MAX_NR_GPIO) { 223 for_each_set_bit(gpio, priv->gpio_enabled_mask, XLP_MAX_NR_GPIO) {
220 if (regoff != gpio / XLP_GPIO_REGSZ) { 224 if (regoff != gpio / XLP_GPIO_REGSZ) {
221 regoff = gpio / XLP_GPIO_REGSZ; 225 regoff = gpio / XLP_GPIO_REGSZ;
222 gpio_stat = readl(priv->gpio_intr_stat + regoff * 4); 226 gpio_stat = readl(priv->gpio_intr_stat + regoff * 4);
223 } 227 }
228
224 if (gpio_stat & BIT(gpio % XLP_GPIO_REGSZ)) 229 if (gpio_stat & BIT(gpio % XLP_GPIO_REGSZ))
225 generic_handle_irq(irq_find_mapping( 230 generic_handle_irq(irq_find_mapping(
226 priv->chip.irqdomain, gpio)); 231 priv->chip.irqdomain, gpio));
227 } 232 }
228 233 chained_irq_exit(irqchip, desc);
229 return IRQ_HANDLED;
230} 234}
231 235
232static int xlp_gpio_dir_output(struct gpio_chip *gc, unsigned gpio, int state) 236static int xlp_gpio_dir_output(struct gpio_chip *gc, unsigned gpio, int state)
@@ -378,12 +382,6 @@ static int xlp_gpio_probe(struct platform_device *pdev)
378 gc->get = xlp_gpio_get; 382 gc->get = xlp_gpio_get;
379 383
380 spin_lock_init(&priv->lock); 384 spin_lock_init(&priv->lock);
381
382 err = devm_request_irq(&pdev->dev, irq, xlp_gpio_generic_handler,
383 IRQ_TYPE_NONE, pdev->name, priv);
384 if (err)
385 return err;
386
387 irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0); 385 irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
388 if (irq_base < 0) { 386 if (irq_base < 0) {
389 dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n"); 387 dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
@@ -401,6 +399,9 @@ static int xlp_gpio_probe(struct platform_device *pdev)
401 goto out_gpio_remove; 399 goto out_gpio_remove;
402 } 400 }
403 401
402 gpiochip_set_chained_irqchip(gc, &xlp_gpio_irq_chip, irq,
403 xlp_gpio_generic_handler);
404
404 dev_info(&pdev->dev, "registered %d GPIOs\n", gc->ngpio); 405 dev_info(&pdev->dev, "registered %d GPIOs\n", gc->ngpio);
405 406
406 return 0; 407 return 0;
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 4b8a26910705..1dcf7a66dd36 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -41,7 +41,6 @@ struct zx_gpio {
41 41
42 void __iomem *base; 42 void __iomem *base;
43 struct gpio_chip gc; 43 struct gpio_chip gc;
44 bool uses_pinctrl;
45}; 44};
46 45
47static inline struct zx_gpio *to_zx(struct gpio_chip *gc) 46static inline struct zx_gpio *to_zx(struct gpio_chip *gc)
@@ -49,25 +48,6 @@ static inline struct zx_gpio *to_zx(struct gpio_chip *gc)
49 return container_of(gc, struct zx_gpio, gc); 48 return container_of(gc, struct zx_gpio, gc);
50} 49}
51 50
52static int zx_gpio_request(struct gpio_chip *gc, unsigned offset)
53{
54 struct zx_gpio *chip = to_zx(gc);
55 int gpio = gc->base + offset;
56
57 if (chip->uses_pinctrl)
58 return pinctrl_request_gpio(gpio);
59 return 0;
60}
61
62static void zx_gpio_free(struct gpio_chip *gc, unsigned offset)
63{
64 struct zx_gpio *chip = to_zx(gc);
65 int gpio = gc->base + offset;
66
67 if (chip->uses_pinctrl)
68 pinctrl_free_gpio(gpio);
69}
70
71static int zx_direction_input(struct gpio_chip *gc, unsigned offset) 51static int zx_direction_input(struct gpio_chip *gc, unsigned offset)
72{ 52{
73 struct zx_gpio *chip = to_zx(gc); 53 struct zx_gpio *chip = to_zx(gc);
@@ -252,12 +232,12 @@ static int zx_gpio_probe(struct platform_device *pdev)
252 return PTR_ERR(chip->base); 232 return PTR_ERR(chip->base);
253 233
254 spin_lock_init(&chip->lock); 234 spin_lock_init(&chip->lock);
255 if (of_property_read_bool(dev->of_node, "gpio-ranges")) 235 if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
256 chip->uses_pinctrl = true; 236 chip->gc.request = gpiochip_generic_request;
237 chip->gc.free = gpiochip_generic_free;
238 }
257 239
258 id = of_alias_get_id(dev->of_node, "gpio"); 240 id = of_alias_get_id(dev->of_node, "gpio");
259 chip->gc.request = zx_gpio_request;
260 chip->gc.free = zx_gpio_free;
261 chip->gc.direction_input = zx_direction_input; 241 chip->gc.direction_input = zx_direction_input;
262 chip->gc.direction_output = zx_direction_output; 242 chip->gc.direction_output = zx_direction_output;
263 chip->gc.get = zx_get_value; 243 chip->gc.get = zx_get_value;
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 1d1a5865ede9..8abeacac5885 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -130,6 +130,12 @@ struct zynq_platform_data {
130 130
131static struct irq_chip zynq_gpio_level_irqchip; 131static struct irq_chip zynq_gpio_level_irqchip;
132static struct irq_chip zynq_gpio_edge_irqchip; 132static struct irq_chip zynq_gpio_edge_irqchip;
133
134static struct zynq_gpio *to_zynq_gpio(struct gpio_chip *gc)
135{
136 return container_of(gc, struct zynq_gpio, chip);
137}
138
133/** 139/**
134 * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank 140 * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank
135 * for a given pin in the GPIO device 141 * for a given pin in the GPIO device
@@ -177,7 +183,7 @@ static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
177{ 183{
178 u32 data; 184 u32 data;
179 unsigned int bank_num, bank_pin_num; 185 unsigned int bank_num, bank_pin_num;
180 struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); 186 struct zynq_gpio *gpio = to_zynq_gpio(chip);
181 187
182 zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); 188 zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
183 189
@@ -201,7 +207,7 @@ static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
201 int state) 207 int state)
202{ 208{
203 unsigned int reg_offset, bank_num, bank_pin_num; 209 unsigned int reg_offset, bank_num, bank_pin_num;
204 struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); 210 struct zynq_gpio *gpio = to_zynq_gpio(chip);
205 211
206 zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); 212 zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
207 213
@@ -238,7 +244,7 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
238{ 244{
239 u32 reg; 245 u32 reg;
240 unsigned int bank_num, bank_pin_num; 246 unsigned int bank_num, bank_pin_num;
241 struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); 247 struct zynq_gpio *gpio = to_zynq_gpio(chip);
242 248
243 zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); 249 zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
244 250
@@ -271,7 +277,7 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
271{ 277{
272 u32 reg; 278 u32 reg;
273 unsigned int bank_num, bank_pin_num; 279 unsigned int bank_num, bank_pin_num;
274 struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); 280 struct zynq_gpio *gpio = to_zynq_gpio(chip);
275 281
276 zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); 282 zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
277 283
@@ -301,7 +307,8 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
301static void zynq_gpio_irq_mask(struct irq_data *irq_data) 307static void zynq_gpio_irq_mask(struct irq_data *irq_data)
302{ 308{
303 unsigned int device_pin_num, bank_num, bank_pin_num; 309 unsigned int device_pin_num, bank_num, bank_pin_num;
304 struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); 310 struct zynq_gpio *gpio =
311 to_zynq_gpio(irq_data_get_irq_chip_data(irq_data));
305 312
306 device_pin_num = irq_data->hwirq; 313 device_pin_num = irq_data->hwirq;
307 zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); 314 zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio);
@@ -321,7 +328,8 @@ static void zynq_gpio_irq_mask(struct irq_data *irq_data)
321static void zynq_gpio_irq_unmask(struct irq_data *irq_data) 328static void zynq_gpio_irq_unmask(struct irq_data *irq_data)
322{ 329{
323 unsigned int device_pin_num, bank_num, bank_pin_num; 330 unsigned int device_pin_num, bank_num, bank_pin_num;
324 struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); 331 struct zynq_gpio *gpio =
332 to_zynq_gpio(irq_data_get_irq_chip_data(irq_data));
325 333
326 device_pin_num = irq_data->hwirq; 334 device_pin_num = irq_data->hwirq;
327 zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); 335 zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio);
@@ -340,7 +348,8 @@ static void zynq_gpio_irq_unmask(struct irq_data *irq_data)
340static void zynq_gpio_irq_ack(struct irq_data *irq_data) 348static void zynq_gpio_irq_ack(struct irq_data *irq_data)
341{ 349{
342 unsigned int device_pin_num, bank_num, bank_pin_num; 350 unsigned int device_pin_num, bank_num, bank_pin_num;
343 struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); 351 struct zynq_gpio *gpio =
352 to_zynq_gpio(irq_data_get_irq_chip_data(irq_data));
344 353
345 device_pin_num = irq_data->hwirq; 354 device_pin_num = irq_data->hwirq;
346 zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); 355 zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio);
@@ -390,7 +399,8 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
390{ 399{
391 u32 int_type, int_pol, int_any; 400 u32 int_type, int_pol, int_any;
392 unsigned int device_pin_num, bank_num, bank_pin_num; 401 unsigned int device_pin_num, bank_num, bank_pin_num;
393 struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); 402 struct zynq_gpio *gpio =
403 to_zynq_gpio(irq_data_get_irq_chip_data(irq_data));
394 404
395 device_pin_num = irq_data->hwirq; 405 device_pin_num = irq_data->hwirq;
396 zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); 406 zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio);
@@ -453,7 +463,8 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
453 463
454static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on) 464static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
455{ 465{
456 struct zynq_gpio *gpio = irq_data_get_irq_chip_data(data); 466 struct zynq_gpio *gpio =
467 to_zynq_gpio(irq_data_get_irq_chip_data(data));
457 468
458 irq_set_irq_wake(gpio->irq, on); 469 irq_set_irq_wake(gpio->irq, on);
459 470
@@ -518,7 +529,8 @@ static void zynq_gpio_irqhandler(struct irq_desc *desc)
518{ 529{
519 u32 int_sts, int_enb; 530 u32 int_sts, int_enb;
520 unsigned int bank_num; 531 unsigned int bank_num;
521 struct zynq_gpio *gpio = irq_desc_get_handler_data(desc); 532 struct zynq_gpio *gpio =
533 to_zynq_gpio(irq_desc_get_handler_data(desc));
522 struct irq_chip *irqchip = irq_desc_get_chip(desc); 534 struct irq_chip *irqchip = irq_desc_get_chip(desc);
523 535
524 chained_irq_enter(irqchip, desc); 536 chained_irq_enter(irqchip, desc);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 143a9bdbaa53..bbcac3af2a7a 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -304,7 +304,6 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
304 if (ACPI_FAILURE(status)) 304 if (ACPI_FAILURE(status))
305 return; 305 return;
306 306
307 INIT_LIST_HEAD(&acpi_gpio->events);
308 acpi_walk_resources(handle, "_AEI", 307 acpi_walk_resources(handle, "_AEI",
309 acpi_gpiochip_request_interrupt, acpi_gpio); 308 acpi_gpiochip_request_interrupt, acpi_gpio);
310} 309}
@@ -603,6 +602,25 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
603 break; 602 break;
604 } 603 }
605 } 604 }
605
606 /*
607 * The same GPIO can be shared between operation region and
608 * event but only if the access here is ACPI_READ. In that
609 * case we "borrow" the event GPIO instead.
610 */
611 if (!found && agpio->sharable == ACPI_SHARED &&
612 function == ACPI_READ) {
613 struct acpi_gpio_event *event;
614
615 list_for_each_entry(event, &achip->events, node) {
616 if (event->pin == pin) {
617 desc = event->desc;
618 found = true;
619 break;
620 }
621 }
622 }
623
606 if (!found) { 624 if (!found) {
607 desc = gpiochip_request_own_desc(chip, pin, 625 desc = gpiochip_request_own_desc(chip, pin,
608 "ACPI:OpRegion"); 626 "ACPI:OpRegion");
@@ -719,6 +737,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
719 } 737 }
720 738
721 acpi_gpio->chip = chip; 739 acpi_gpio->chip = chip;
740 INIT_LIST_HEAD(&acpi_gpio->events);
722 741
723 status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); 742 status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
724 if (ACPI_FAILURE(status)) { 743 if (ACPI_FAILURE(status)) {
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index 8b830996fe02..3a5c7011ad3b 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -28,10 +28,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
28 if (!desc && gpio_is_valid(gpio)) 28 if (!desc && gpio_is_valid(gpio))
29 return -EPROBE_DEFER; 29 return -EPROBE_DEFER;
30 30
31 err = gpiod_request(desc, label);
32 if (err)
33 return err;
34
35 if (flags & GPIOF_OPEN_DRAIN) 31 if (flags & GPIOF_OPEN_DRAIN)
36 set_bit(FLAG_OPEN_DRAIN, &desc->flags); 32 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
37 33
@@ -41,6 +37,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
41 if (flags & GPIOF_ACTIVE_LOW) 37 if (flags & GPIOF_ACTIVE_LOW)
42 set_bit(FLAG_ACTIVE_LOW, &desc->flags); 38 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
43 39
40 err = gpiod_request(desc, label);
41 if (err)
42 return err;
43
44 if (flags & GPIOF_DIR_IN) 44 if (flags & GPIOF_DIR_IN)
45 err = gpiod_direction_input(desc); 45 err = gpiod_direction_input(desc);
46 else 46 else
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index fa6e3c8823d6..5fe34a9df3e6 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -119,20 +119,20 @@ int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
119EXPORT_SYMBOL(of_get_named_gpio_flags); 119EXPORT_SYMBOL(of_get_named_gpio_flags);
120 120
121/** 121/**
122 * of_get_gpio_hog() - Get a GPIO hog descriptor, names and flags for GPIO API 122 * of_parse_own_gpio() - Get a GPIO hog descriptor, names and flags for GPIO API
123 * @np: device node to get GPIO from 123 * @np: device node to get GPIO from
124 * @name: GPIO line name 124 * @name: GPIO line name
125 * @lflags: gpio_lookup_flags - returned from of_find_gpio() or 125 * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
126 * of_get_gpio_hog() 126 * of_parse_own_gpio()
127 * @dflags: gpiod_flags - optional GPIO initialization flags 127 * @dflags: gpiod_flags - optional GPIO initialization flags
128 * 128 *
129 * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno 129 * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno
130 * value on the error condition. 130 * value on the error condition.
131 */ 131 */
132static struct gpio_desc *of_get_gpio_hog(struct device_node *np, 132static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
133 const char **name, 133 const char **name,
134 enum gpio_lookup_flags *lflags, 134 enum gpio_lookup_flags *lflags,
135 enum gpiod_flags *dflags) 135 enum gpiod_flags *dflags)
136{ 136{
137 struct device_node *chip_np; 137 struct device_node *chip_np;
138 enum of_gpio_flags xlate_flags; 138 enum of_gpio_flags xlate_flags;
@@ -196,13 +196,13 @@ static struct gpio_desc *of_get_gpio_hog(struct device_node *np,
196} 196}
197 197
198/** 198/**
199 * of_gpiochip_scan_hogs - Scan gpio-controller and apply GPIO hog as requested 199 * of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions
200 * @chip: gpio chip to act on 200 * @chip: gpio chip to act on
201 * 201 *
202 * This is only used by of_gpiochip_add to request/set GPIO initial 202 * This is only used by of_gpiochip_add to request/set GPIO initial
203 * configuration. 203 * configuration.
204 */ 204 */
205static void of_gpiochip_scan_hogs(struct gpio_chip *chip) 205static void of_gpiochip_scan_gpios(struct gpio_chip *chip)
206{ 206{
207 struct gpio_desc *desc = NULL; 207 struct gpio_desc *desc = NULL;
208 struct device_node *np; 208 struct device_node *np;
@@ -214,7 +214,7 @@ static void of_gpiochip_scan_hogs(struct gpio_chip *chip)
214 if (!of_property_read_bool(np, "gpio-hog")) 214 if (!of_property_read_bool(np, "gpio-hog"))
215 continue; 215 continue;
216 216
217 desc = of_get_gpio_hog(np, &name, &lflags, &dflags); 217 desc = of_parse_own_gpio(np, &name, &lflags, &dflags);
218 if (IS_ERR(desc)) 218 if (IS_ERR(desc))
219 continue; 219 continue;
220 220
@@ -440,7 +440,7 @@ int of_gpiochip_add(struct gpio_chip *chip)
440 440
441 of_node_get(chip->of_node); 441 of_node_get(chip->of_node);
442 442
443 of_gpiochip_scan_hogs(chip); 443 of_gpiochip_scan_gpios(chip);
444 444
445 return 0; 445 return 0;
446} 446}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5db3445552b1..6798355c61c6 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -15,6 +15,7 @@
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <linux/gpio/driver.h> 16#include <linux/gpio/driver.h>
17#include <linux/gpio/machine.h> 17#include <linux/gpio/machine.h>
18#include <linux/pinctrl/consumer.h>
18 19
19#include "gpiolib.h" 20#include "gpiolib.h"
20 21
@@ -47,8 +48,6 @@
47 */ 48 */
48DEFINE_SPINLOCK(gpio_lock); 49DEFINE_SPINLOCK(gpio_lock);
49 50
50#define GPIO_OFFSET_VALID(chip, offset) (offset >= 0 && offset < chip->ngpio)
51
52static DEFINE_MUTEX(gpio_lookup_lock); 51static DEFINE_MUTEX(gpio_lookup_lock);
53static LIST_HEAD(gpio_lookup_list); 52static LIST_HEAD(gpio_lookup_list);
54LIST_HEAD(gpio_chips); 53LIST_HEAD(gpio_chips);
@@ -219,6 +218,68 @@ static int gpiochip_add_to_list(struct gpio_chip *chip)
219} 218}
220 219
221/** 220/**
221 * Convert a GPIO name to its descriptor
222 */
223static struct gpio_desc *gpio_name_to_desc(const char * const name)
224{
225 struct gpio_chip *chip;
226 unsigned long flags;
227
228 spin_lock_irqsave(&gpio_lock, flags);
229
230 list_for_each_entry(chip, &gpio_chips, list) {
231 int i;
232
233 for (i = 0; i != chip->ngpio; ++i) {
234 struct gpio_desc *gpio = &chip->desc[i];
235
236 if (!gpio->name)
237 continue;
238
239 if (!strcmp(gpio->name, name)) {
240 spin_unlock_irqrestore(&gpio_lock, flags);
241 return gpio;
242 }
243 }
244 }
245
246 spin_unlock_irqrestore(&gpio_lock, flags);
247
248 return NULL;
249}
250
251/*
252 * Takes the names from gc->names and checks if they are all unique. If they
253 * are, they are assigned to their gpio descriptors.
254 *
255 * Returns -EEXIST if one of the names is already used for a different GPIO.
256 */
257static int gpiochip_set_desc_names(struct gpio_chip *gc)
258{
259 int i;
260
261 if (!gc->names)
262 return 0;
263
264 /* First check all names if they are unique */
265 for (i = 0; i != gc->ngpio; ++i) {
266 struct gpio_desc *gpio;
267
268 gpio = gpio_name_to_desc(gc->names[i]);
269 if (gpio)
270 dev_warn(gc->dev, "Detected name collision for "
271 "GPIO name '%s'\n",
272 gc->names[i]);
273 }
274
275 /* Then add all names to the GPIO descriptors */
276 for (i = 0; i != gc->ngpio; ++i)
277 gc->desc[i].name = gc->names[i];
278
279 return 0;
280}
281
282/**
222 * gpiochip_add() - register a gpio_chip 283 * gpiochip_add() - register a gpio_chip
223 * @chip: the chip to register, with chip->base initialized 284 * @chip: the chip to register, with chip->base initialized
224 * Context: potentially before irqs will work 285 * Context: potentially before irqs will work
@@ -290,6 +351,10 @@ int gpiochip_add(struct gpio_chip *chip)
290 if (!chip->owner && chip->dev && chip->dev->driver) 351 if (!chip->owner && chip->dev && chip->dev->driver)
291 chip->owner = chip->dev->driver->owner; 352 chip->owner = chip->dev->driver->owner;
292 353
354 status = gpiochip_set_desc_names(chip);
355 if (status)
356 goto err_remove_from_list;
357
293 status = of_gpiochip_add(chip); 358 status = of_gpiochip_add(chip);
294 if (status) 359 if (status)
295 goto err_remove_chip; 360 goto err_remove_chip;
@@ -310,6 +375,7 @@ err_remove_chip:
310 acpi_gpiochip_remove(chip); 375 acpi_gpiochip_remove(chip);
311 gpiochip_free_hogs(chip); 376 gpiochip_free_hogs(chip);
312 of_gpiochip_remove(chip); 377 of_gpiochip_remove(chip);
378err_remove_from_list:
313 spin_lock_irqsave(&gpio_lock, flags); 379 spin_lock_irqsave(&gpio_lock, flags);
314 list_del(&chip->list); 380 list_del(&chip->list);
315 spin_unlock_irqrestore(&gpio_lock, flags); 381 spin_unlock_irqrestore(&gpio_lock, flags);
@@ -680,6 +746,28 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
680 746
681#endif /* CONFIG_GPIOLIB_IRQCHIP */ 747#endif /* CONFIG_GPIOLIB_IRQCHIP */
682 748
749/**
750 * gpiochip_generic_request() - request the gpio function for a pin
751 * @chip: the gpiochip owning the GPIO
752 * @offset: the offset of the GPIO to request for GPIO function
753 */
754int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset)
755{
756 return pinctrl_request_gpio(chip->base + offset);
757}
758EXPORT_SYMBOL_GPL(gpiochip_generic_request);
759
760/**
761 * gpiochip_generic_free() - free the gpio function from a pin
762 * @chip: the gpiochip to request the gpio function for
763 * @offset: the offset of the GPIO to free from GPIO function
764 */
765void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
766{
767 pinctrl_free_gpio(chip->base + offset);
768}
769EXPORT_SYMBOL_GPL(gpiochip_generic_free);
770
683#ifdef CONFIG_PINCTRL 771#ifdef CONFIG_PINCTRL
684 772
685/** 773/**
@@ -839,6 +927,14 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
839 spin_lock_irqsave(&gpio_lock, flags); 927 spin_lock_irqsave(&gpio_lock, flags);
840 } 928 }
841done: 929done:
930 if (status < 0) {
931 /* Clear flags that might have been set by the caller before
932 * requesting the GPIO.
933 */
934 clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
935 clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
936 clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
937 }
842 spin_unlock_irqrestore(&gpio_lock, flags); 938 spin_unlock_irqrestore(&gpio_lock, flags);
843 return status; 939 return status;
844} 940}
@@ -928,7 +1024,7 @@ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
928{ 1024{
929 struct gpio_desc *desc; 1025 struct gpio_desc *desc;
930 1026
931 if (!GPIO_OFFSET_VALID(chip, offset)) 1027 if (offset >= chip->ngpio)
932 return NULL; 1028 return NULL;
933 1029
934 desc = &chip->desc[offset]; 1030 desc = &chip->desc[offset];
@@ -1735,6 +1831,13 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
1735 if (of_flags & OF_GPIO_ACTIVE_LOW) 1831 if (of_flags & OF_GPIO_ACTIVE_LOW)
1736 *flags |= GPIO_ACTIVE_LOW; 1832 *flags |= GPIO_ACTIVE_LOW;
1737 1833
1834 if (of_flags & OF_GPIO_SINGLE_ENDED) {
1835 if (of_flags & OF_GPIO_ACTIVE_LOW)
1836 *flags |= GPIO_OPEN_DRAIN;
1837 else
1838 *flags |= GPIO_OPEN_SOURCE;
1839 }
1840
1738 return desc; 1841 return desc;
1739} 1842}
1740 1843
@@ -1953,13 +2056,28 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
1953} 2056}
1954EXPORT_SYMBOL_GPL(gpiod_get_optional); 2057EXPORT_SYMBOL_GPL(gpiod_get_optional);
1955 2058
2059/**
2060 * gpiod_parse_flags - helper function to parse GPIO lookup flags
2061 * @desc: gpio to be setup
2062 * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
2063 * of_get_gpio_hog()
2064 *
2065 * Set the GPIO descriptor flags based on the given GPIO lookup flags.
2066 */
2067static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
2068{
2069 if (lflags & GPIO_ACTIVE_LOW)
2070 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
2071 if (lflags & GPIO_OPEN_DRAIN)
2072 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
2073 if (lflags & GPIO_OPEN_SOURCE)
2074 set_bit(FLAG_OPEN_SOURCE, &desc->flags);
2075}
1956 2076
1957/** 2077/**
1958 * gpiod_configure_flags - helper function to configure a given GPIO 2078 * gpiod_configure_flags - helper function to configure a given GPIO
1959 * @desc: gpio whose value will be assigned 2079 * @desc: gpio whose value will be assigned
1960 * @con_id: function within the GPIO consumer 2080 * @con_id: function within the GPIO consumer
1961 * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
1962 * of_get_gpio_hog()
1963 * @dflags: gpiod_flags - optional GPIO initialization flags 2081 * @dflags: gpiod_flags - optional GPIO initialization flags
1964 * 2082 *
1965 * Return 0 on success, -ENOENT if no GPIO has been assigned to the 2083 * Return 0 on success, -ENOENT if no GPIO has been assigned to the
@@ -1967,17 +2085,10 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
1967 * occurred while trying to acquire the GPIO. 2085 * occurred while trying to acquire the GPIO.
1968 */ 2086 */
1969static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, 2087static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
1970 unsigned long lflags, enum gpiod_flags dflags) 2088 enum gpiod_flags dflags)
1971{ 2089{
1972 int status; 2090 int status;
1973 2091
1974 if (lflags & GPIO_ACTIVE_LOW)
1975 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1976 if (lflags & GPIO_OPEN_DRAIN)
1977 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
1978 if (lflags & GPIO_OPEN_SOURCE)
1979 set_bit(FLAG_OPEN_SOURCE, &desc->flags);
1980
1981 /* No particular flag request, return here... */ 2092 /* No particular flag request, return here... */
1982 if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) { 2093 if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
1983 pr_debug("no flags found for %s\n", con_id); 2094 pr_debug("no flags found for %s\n", con_id);
@@ -2044,11 +2155,13 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2044 return desc; 2155 return desc;
2045 } 2156 }
2046 2157
2158 gpiod_parse_flags(desc, lookupflags);
2159
2047 status = gpiod_request(desc, con_id); 2160 status = gpiod_request(desc, con_id);
2048 if (status < 0) 2161 if (status < 0)
2049 return ERR_PTR(status); 2162 return ERR_PTR(status);
2050 2163
2051 status = gpiod_configure_flags(desc, con_id, lookupflags, flags); 2164 status = gpiod_configure_flags(desc, con_id, flags);
2052 if (status < 0) { 2165 if (status < 0) {
2053 dev_dbg(dev, "setup of GPIO %s failed\n", con_id); 2166 dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
2054 gpiod_put(desc); 2167 gpiod_put(desc);
@@ -2078,6 +2191,7 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
2078{ 2191{
2079 struct gpio_desc *desc = ERR_PTR(-ENODEV); 2192 struct gpio_desc *desc = ERR_PTR(-ENODEV);
2080 bool active_low = false; 2193 bool active_low = false;
2194 bool single_ended = false;
2081 int ret; 2195 int ret;
2082 2196
2083 if (!fwnode) 2197 if (!fwnode)
@@ -2088,8 +2202,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
2088 2202
2089 desc = of_get_named_gpiod_flags(to_of_node(fwnode), propname, 0, 2203 desc = of_get_named_gpiod_flags(to_of_node(fwnode), propname, 0,
2090 &flags); 2204 &flags);
2091 if (!IS_ERR(desc)) 2205 if (!IS_ERR(desc)) {
2092 active_low = flags & OF_GPIO_ACTIVE_LOW; 2206 active_low = flags & OF_GPIO_ACTIVE_LOW;
2207 single_ended = flags & OF_GPIO_SINGLE_ENDED;
2208 }
2093 } else if (is_acpi_node(fwnode)) { 2209 } else if (is_acpi_node(fwnode)) {
2094 struct acpi_gpio_info info; 2210 struct acpi_gpio_info info;
2095 2211
@@ -2102,14 +2218,20 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
2102 if (IS_ERR(desc)) 2218 if (IS_ERR(desc))
2103 return desc; 2219 return desc;
2104 2220
2221 if (active_low)
2222 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
2223
2224 if (single_ended) {
2225 if (active_low)
2226 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
2227 else
2228 set_bit(FLAG_OPEN_SOURCE, &desc->flags);
2229 }
2230
2105 ret = gpiod_request(desc, NULL); 2231 ret = gpiod_request(desc, NULL);
2106 if (ret) 2232 if (ret)
2107 return ERR_PTR(ret); 2233 return ERR_PTR(ret);
2108 2234
2109 /* Only value flag can be set from both DT and ACPI is active_low */
2110 if (active_low)
2111 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
2112
2113 return desc; 2235 return desc;
2114} 2236}
2115EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod); 2237EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@@ -2162,6 +2284,8 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
2162 chip = gpiod_to_chip(desc); 2284 chip = gpiod_to_chip(desc);
2163 hwnum = gpio_chip_hwgpio(desc); 2285 hwnum = gpio_chip_hwgpio(desc);
2164 2286
2287 gpiod_parse_flags(desc, lflags);
2288
2165 local_desc = gpiochip_request_own_desc(chip, hwnum, name); 2289 local_desc = gpiochip_request_own_desc(chip, hwnum, name);
2166 if (IS_ERR(local_desc)) { 2290 if (IS_ERR(local_desc)) {
2167 pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n", 2291 pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n",
@@ -2169,7 +2293,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
2169 return PTR_ERR(local_desc); 2293 return PTR_ERR(local_desc);
2170 } 2294 }
2171 2295
2172 status = gpiod_configure_flags(desc, name, lflags, dflags); 2296 status = gpiod_configure_flags(desc, name, dflags);
2173 if (status < 0) { 2297 if (status < 0) {
2174 pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n", 2298 pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n",
2175 name, chip->label, hwnum); 2299 name, chip->label, hwnum);
@@ -2309,14 +2433,19 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
2309 int is_irq; 2433 int is_irq;
2310 2434
2311 for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) { 2435 for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) {
2312 if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) 2436 if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) {
2437 if (gdesc->name) {
2438 seq_printf(s, " gpio-%-3d (%-20.20s)\n",
2439 gpio, gdesc->name);
2440 }
2313 continue; 2441 continue;
2442 }
2314 2443
2315 gpiod_get_direction(gdesc); 2444 gpiod_get_direction(gdesc);
2316 is_out = test_bit(FLAG_IS_OUT, &gdesc->flags); 2445 is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
2317 is_irq = test_bit(FLAG_USED_AS_IRQ, &gdesc->flags); 2446 is_irq = test_bit(FLAG_USED_AS_IRQ, &gdesc->flags);
2318 seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s", 2447 seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s",
2319 gpio, gdesc->label, 2448 gpio, gdesc->name ? gdesc->name : "", gdesc->label,
2320 is_out ? "out" : "in ", 2449 is_out ? "out" : "in ",
2321 chip->get 2450 chip->get
2322 ? (chip->get(chip, i) ? "hi" : "lo") 2451 ? (chip->get(chip, i) ? "hi" : "lo")
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index bf343004b008..78e634d1c719 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -89,7 +89,10 @@ struct gpio_desc {
89#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */ 89#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
90#define FLAG_IS_HOGGED 11 /* GPIO is hogged */ 90#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
91 91
92 /* Connection label */
92 const char *label; 93 const char *label;
94 /* Name of the GPIO */
95 const char *name;
93}; 96};
94 97
95int gpiod_request(struct gpio_desc *desc, const char *label); 98int gpiod_request(struct gpio_desc *desc, const char *label);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6647fb26ef25..0d13e6368b96 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1654,6 +1654,7 @@ struct amdgpu_pm {
1654 u8 fan_max_rpm; 1654 u8 fan_max_rpm;
1655 /* dpm */ 1655 /* dpm */
1656 bool dpm_enabled; 1656 bool dpm_enabled;
1657 bool sysfs_initialized;
1657 struct amdgpu_dpm dpm; 1658 struct amdgpu_dpm dpm;
1658 const struct firmware *fw; /* SMC firmware */ 1659 const struct firmware *fw; /* SMC firmware */
1659 uint32_t fw_version; 1660 uint32_t fw_version;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index dc29ed8145c2..6c9e0902a414 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -184,10 +184,6 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
184 goto cleanup; 184 goto cleanup;
185 } 185 }
186 186
187 fence_get(work->excl);
188 for (i = 0; i < work->shared_count; ++i)
189 fence_get(work->shared[i]);
190
191 amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); 187 amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
192 amdgpu_bo_unreserve(new_rbo); 188 amdgpu_bo_unreserve(new_rbo);
193 189
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index efed11509f4a..22a8c7d3a3ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -294,10 +294,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
294 struct amdgpu_device *adev = dev_get_drvdata(dev); 294 struct amdgpu_device *adev = dev_get_drvdata(dev);
295 umode_t effective_mode = attr->mode; 295 umode_t effective_mode = attr->mode;
296 296
297 /* Skip limit attributes if DPM is not enabled */ 297 /* Skip attributes if DPM is not enabled */
298 if (!adev->pm.dpm_enabled && 298 if (!adev->pm.dpm_enabled &&
299 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 299 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
300 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 300 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
301 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
302 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
303 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
304 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
301 return 0; 305 return 0;
302 306
303 /* Skip fan attributes if fan is not present */ 307 /* Skip fan attributes if fan is not present */
@@ -691,6 +695,9 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
691{ 695{
692 int ret; 696 int ret;
693 697
698 if (adev->pm.sysfs_initialized)
699 return 0;
700
694 if (adev->pm.funcs->get_temperature == NULL) 701 if (adev->pm.funcs->get_temperature == NULL)
695 return 0; 702 return 0;
696 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 703 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
@@ -719,6 +726,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
719 return ret; 726 return ret;
720 } 727 }
721 728
729 adev->pm.sysfs_initialized = true;
730
722 return 0; 731 return 0;
723} 732}
724 733
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 9745ed3a9aef..7e9154c7f1db 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2997,6 +2997,9 @@ static int kv_dpm_late_init(void *handle)
2997 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2997 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2998 int ret; 2998 int ret;
2999 2999
3000 if (!amdgpu_dpm)
3001 return 0;
3002
3000 /* init the sysfs and debugfs files late */ 3003 /* init the sysfs and debugfs files late */
3001 ret = amdgpu_pm_sysfs_init(adev); 3004 ret = amdgpu_pm_sysfs_init(adev);
3002 if (ret) 3005 if (ret)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 33d877c65ced..8328e7059205 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -4105,7 +4105,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
4105 struct drm_property_blob *blob; 4105 struct drm_property_blob *blob;
4106 int ret; 4106 int ret;
4107 4107
4108 if (!length) 4108 if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
4109 return ERR_PTR(-EINVAL); 4109 return ERR_PTR(-EINVAL);
4110 4110
4111 blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); 4111 blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
@@ -4454,7 +4454,7 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
4454 * not associated with any file_priv. */ 4454 * not associated with any file_priv. */
4455 mutex_lock(&dev->mode_config.blob_lock); 4455 mutex_lock(&dev->mode_config.blob_lock);
4456 out_resp->blob_id = blob->base.id; 4456 out_resp->blob_id = blob->base.id;
4457 list_add_tail(&file_priv->blobs, &blob->head_file); 4457 list_add_tail(&blob->head_file, &file_priv->blobs);
4458 mutex_unlock(&dev->mode_config.blob_lock); 4458 mutex_unlock(&dev->mode_config.blob_lock);
4459 4459
4460 return 0; 4460 return 0;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 5bca390d9ae2..809959d56d78 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1194,17 +1194,18 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1194 1194
1195 list_for_each_entry(port, &mstb->ports, next) { 1195 list_for_each_entry(port, &mstb->ports, next) {
1196 if (port->port_num == port_num) { 1196 if (port->port_num == port_num) {
1197 if (!port->mstb) { 1197 mstb = port->mstb;
1198 if (!mstb) {
1198 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); 1199 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1199 return NULL; 1200 goto out;
1200 } 1201 }
1201 1202
1202 mstb = port->mstb;
1203 break; 1203 break;
1204 } 1204 }
1205 } 1205 }
1206 } 1206 }
1207 kref_get(&mstb->kref); 1207 kref_get(&mstb->kref);
1208out:
1208 mutex_unlock(&mgr->lock); 1209 mutex_unlock(&mgr->lock);
1209 return mstb; 1210 return mstb;
1210} 1211}
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index f6ecbda2c604..674341708033 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -143,7 +143,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
143} 143}
144 144
145/** 145/**
146 * i915_gem_shrink - Shrink buffer object caches completely 146 * i915_gem_shrink_all - Shrink buffer object caches completely
147 * @dev_priv: i915 device 147 * @dev_priv: i915 device
148 * 148 *
149 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all 149 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 8fd431bcdfd3..a96b9006a51e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -804,7 +804,10 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
804 * Also note, that the object created here is not currently a "first class" 804 * Also note, that the object created here is not currently a "first class"
805 * object, in that several ioctls are banned. These are the CPU access 805 * object, in that several ioctls are banned. These are the CPU access
806 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use 806 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
807 * direct access via your pointer rather than use those ioctls. 807 * direct access via your pointer rather than use those ioctls. Another
808 * restriction is that we do not allow userptr surfaces to be pinned to the
809 * hardware and so we reject any attempt to create a framebuffer out of a
810 * userptr.
808 * 811 *
809 * If you think this is a good interface to use to pass GPU memory between 812 * If you think this is a good interface to use to pass GPU memory between
810 * drivers, please use dma-buf instead. In fact, wherever possible use 813 * drivers, please use dma-buf instead. In fact, wherever possible use
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index cf418be7d30a..b2270d576979 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1724,6 +1724,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1724 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1724 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1725 } 1725 }
1726 1726
1727 /*
1728 * Apparently we need to have VGA mode enabled prior to changing
1729 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1730 * dividers, even though the register value does change.
1731 */
1732 I915_WRITE(reg, 0);
1733
1734 I915_WRITE(reg, dpll);
1735
1727 /* Wait for the clocks to stabilize. */ 1736 /* Wait for the clocks to stabilize. */
1728 POSTING_READ(reg); 1737 POSTING_READ(reg);
1729 udelay(150); 1738 udelay(150);
@@ -14107,6 +14116,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14107 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14116 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14108 struct drm_i915_gem_object *obj = intel_fb->obj; 14117 struct drm_i915_gem_object *obj = intel_fb->obj;
14109 14118
14119 if (obj->userptr.mm) {
14120 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14121 return -EINVAL;
14122 }
14123
14110 return drm_gem_handle_create(file, &obj->base, handle); 14124 return drm_gem_handle_create(file, &obj->base, handle);
14111} 14125}
14112 14126
@@ -14897,9 +14911,19 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
14897 /* restore vblank interrupts to correct state */ 14911 /* restore vblank interrupts to correct state */
14898 drm_crtc_vblank_reset(&crtc->base); 14912 drm_crtc_vblank_reset(&crtc->base);
14899 if (crtc->active) { 14913 if (crtc->active) {
14914 struct intel_plane *plane;
14915
14900 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 14916 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
14901 update_scanline_offset(crtc); 14917 update_scanline_offset(crtc);
14902 drm_crtc_vblank_on(&crtc->base); 14918 drm_crtc_vblank_on(&crtc->base);
14919
14920 /* Disable everything but the primary plane */
14921 for_each_intel_plane_on_crtc(dev, crtc, plane) {
14922 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
14923 continue;
14924
14925 plane->disable_plane(&plane->base, &crtc->base);
14926 }
14903 } 14927 }
14904 14928
14905 /* We need to sanitize the plane -> pipe mapping first because this will 14929 /* We need to sanitize the plane -> pipe mapping first because this will
@@ -15067,38 +15091,25 @@ void i915_redisable_vga(struct drm_device *dev)
15067 i915_redisable_vga_power_on(dev); 15091 i915_redisable_vga_power_on(dev);
15068} 15092}
15069 15093
15070static bool primary_get_hw_state(struct intel_crtc *crtc) 15094static bool primary_get_hw_state(struct intel_plane *plane)
15071{ 15095{
15072 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 15096 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15073 15097
15074 return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE); 15098 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15075} 15099}
15076 15100
15077static void readout_plane_state(struct intel_crtc *crtc, 15101/* FIXME read out full plane state for all planes */
15078 struct intel_crtc_state *crtc_state) 15102static void readout_plane_state(struct intel_crtc *crtc)
15079{ 15103{
15080 struct intel_plane *p; 15104 struct drm_plane *primary = crtc->base.primary;
15081 struct intel_plane_state *plane_state; 15105 struct intel_plane_state *plane_state =
15082 bool active = crtc_state->base.active; 15106 to_intel_plane_state(primary->state);
15083
15084 for_each_intel_plane(crtc->base.dev, p) {
15085 if (crtc->pipe != p->pipe)
15086 continue;
15087
15088 plane_state = to_intel_plane_state(p->base.state);
15089 15107
15090 if (p->base.type == DRM_PLANE_TYPE_PRIMARY) { 15108 plane_state->visible =
15091 plane_state->visible = primary_get_hw_state(crtc); 15109 primary_get_hw_state(to_intel_plane(primary));
15092 if (plane_state->visible)
15093 crtc->base.state->plane_mask |=
15094 1 << drm_plane_index(&p->base);
15095 } else {
15096 if (active)
15097 p->disable_plane(&p->base, &crtc->base);
15098 15110
15099 plane_state->visible = false; 15111 if (plane_state->visible)
15100 } 15112 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15101 }
15102} 15113}
15103 15114
15104static void intel_modeset_readout_hw_state(struct drm_device *dev) 15115static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15121,34 +15132,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15121 crtc->base.state->active = crtc->active; 15132 crtc->base.state->active = crtc->active;
15122 crtc->base.enabled = crtc->active; 15133 crtc->base.enabled = crtc->active;
15123 15134
15124 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15135 readout_plane_state(crtc);
15125 if (crtc->base.state->active) {
15126 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15127 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15128 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15129
15130 /*
15131 * The initial mode needs to be set in order to keep
15132 * the atomic core happy. It wants a valid mode if the
15133 * crtc's enabled, so we do the above call.
15134 *
15135 * At this point some state updated by the connectors
15136 * in their ->detect() callback has not run yet, so
15137 * no recalculation can be done yet.
15138 *
15139 * Even if we could do a recalculation and modeset
15140 * right now it would cause a double modeset if
15141 * fbdev or userspace chooses a different initial mode.
15142 *
15143 * If that happens, someone indicated they wanted a
15144 * mode change, which means it's safe to do a full
15145 * recalculation.
15146 */
15147 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15148 }
15149
15150 crtc->base.hwmode = crtc->config->base.adjusted_mode;
15151 readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
15152 15136
15153 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 15137 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15154 crtc->base.base.id, 15138 crtc->base.base.id,
@@ -15207,6 +15191,36 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15207 connector->base.name, 15191 connector->base.name,
15208 connector->base.encoder ? "enabled" : "disabled"); 15192 connector->base.encoder ? "enabled" : "disabled");
15209 } 15193 }
15194
15195 for_each_intel_crtc(dev, crtc) {
15196 crtc->base.hwmode = crtc->config->base.adjusted_mode;
15197
15198 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15199 if (crtc->base.state->active) {
15200 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15201 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15202 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15203
15204 /*
15205 * The initial mode needs to be set in order to keep
15206 * the atomic core happy. It wants a valid mode if the
15207 * crtc's enabled, so we do the above call.
15208 *
15209 * At this point some state updated by the connectors
15210 * in their ->detect() callback has not run yet, so
15211 * no recalculation can be done yet.
15212 *
15213 * Even if we could do a recalculation and modeset
15214 * right now it would cause a double modeset if
15215 * fbdev or userspace chooses a different initial mode.
15216 *
15217 * If that happens, someone indicated they wanted a
15218 * mode change, which means it's safe to do a full
15219 * recalculation.
15220 */
15221 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15222 }
15223 }
15210} 15224}
15211 15225
15212/* Scan out the current hw modeset state, 15226/* Scan out the current hw modeset state,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7412caedcf7f..29dd4488dc49 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1659,6 +1659,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1659 if (flush_domains) { 1659 if (flush_domains) {
1660 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 1660 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1661 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 1661 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1662 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1662 } 1663 }
1663 1664
1664 if (invalidate_domains) { 1665 if (invalidate_domains) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6e6b8db996ef..61b451fbd09e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
347 if (flush_domains) { 347 if (flush_domains) {
348 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 348 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
349 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 349 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
350 flags |= PIPE_CONTROL_FLUSH_ENABLE;
350 } 351 }
351 if (invalidate_domains) { 352 if (invalidate_domains) {
352 flags |= PIPE_CONTROL_TLB_INVALIDATE; 353 flags |= PIPE_CONTROL_TLB_INVALIDATE;
@@ -418,6 +419,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
418 if (flush_domains) { 419 if (flush_domains) {
419 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 420 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
420 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 421 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
422 flags |= PIPE_CONTROL_FLUSH_ENABLE;
421 } 423 }
422 if (invalidate_domains) { 424 if (invalidate_domains) {
423 flags |= PIPE_CONTROL_TLB_INVALIDATE; 425 flags |= PIPE_CONTROL_TLB_INVALIDATE;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2c9981512d27..41be584147b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -227,11 +227,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
227 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 227 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
228 struct nvkm_vma *vma; 228 struct nvkm_vma *vma;
229 229
230 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 230 if (is_power_of_2(nvbo->valid_domains))
231 rep->domain = nvbo->valid_domains;
232 else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
231 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 233 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
232 else 234 else
233 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 235 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
234
235 rep->offset = nvbo->bo.offset; 236 rep->offset = nvbo->bo.offset;
236 if (cli->vm) { 237 if (cli->vm) {
237 vma = nouveau_bo_vma_find(nvbo, cli->vm); 238 vma = nouveau_bo_vma_find(nvbo, cli->vm);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 65adb9c72377..bb292143997e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -237,6 +237,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
237 backlight_update_status(bd); 237 backlight_update_status(bd);
238 238
239 DRM_INFO("radeon atom DIG backlight initialized\n"); 239 DRM_INFO("radeon atom DIG backlight initialized\n");
240 rdev->mode_info.bl_encoder = radeon_encoder;
240 241
241 return; 242 return;
242 243
@@ -1624,9 +1625,14 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
1624 } else 1625 } else
1625 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1626 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1626 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 1627 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1627 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 1628 if (rdev->mode_info.bl_encoder) {
1629 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1628 1630
1629 atombios_set_backlight_level(radeon_encoder, dig->backlight_level); 1631 atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
1632 } else {
1633 args.ucAction = ATOM_LCD_BLON;
1634 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1635 }
1630 } 1636 }
1631 break; 1637 break;
1632 case DRM_MODE_DPMS_STANDBY: 1638 case DRM_MODE_DPMS_STANDBY:
@@ -1706,8 +1712,13 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1706 if (ASIC_IS_DCE4(rdev)) 1712 if (ASIC_IS_DCE4(rdev))
1707 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); 1713 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1708 } 1714 }
1709 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1715 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1710 atombios_set_backlight_level(radeon_encoder, dig->backlight_level); 1716 if (rdev->mode_info.bl_encoder)
1717 atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
1718 else
1719 atombios_dig_transmitter_setup(encoder,
1720 ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1721 }
1711 if (ext_encoder) 1722 if (ext_encoder)
1712 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); 1723 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1713 break; 1724 break;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index f03b7eb15233..b6cbd816537e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1658,6 +1658,7 @@ struct radeon_pm {
1658 u8 fan_max_rpm; 1658 u8 fan_max_rpm;
1659 /* dpm */ 1659 /* dpm */
1660 bool dpm_enabled; 1660 bool dpm_enabled;
1661 bool sysfs_initialized;
1661 struct radeon_dpm dpm; 1662 struct radeon_dpm dpm;
1662}; 1663};
1663 1664
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ef99917f000d..c6ee80216cf4 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -194,7 +194,6 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
194 radeon_atom_backlight_init(radeon_encoder, connector); 194 radeon_atom_backlight_init(radeon_encoder, connector);
195 else 195 else
196 radeon_legacy_backlight_init(radeon_encoder, connector); 196 radeon_legacy_backlight_init(radeon_encoder, connector);
197 rdev->mode_info.bl_encoder = radeon_encoder;
198 } 197 }
199} 198}
200 199
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 45715307db71..30de43366eae 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -441,6 +441,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
441 backlight_update_status(bd); 441 backlight_update_status(bd);
442 442
443 DRM_INFO("radeon legacy LVDS backlight initialized\n"); 443 DRM_INFO("radeon legacy LVDS backlight initialized\n");
444 rdev->mode_info.bl_encoder = radeon_encoder;
444 445
445 return; 446 return;
446 447
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 44489cce7458..5feee3b4c557 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -717,10 +717,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
717 struct radeon_device *rdev = dev_get_drvdata(dev); 717 struct radeon_device *rdev = dev_get_drvdata(dev);
718 umode_t effective_mode = attr->mode; 718 umode_t effective_mode = attr->mode;
719 719
720 /* Skip limit attributes if DPM is not enabled */ 720 /* Skip attributes if DPM is not enabled */
721 if (rdev->pm.pm_method != PM_METHOD_DPM && 721 if (rdev->pm.pm_method != PM_METHOD_DPM &&
722 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 722 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
723 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 723 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
724 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
725 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
726 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
727 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
724 return 0; 728 return 0;
725 729
726 /* Skip fan attributes if fan is not present */ 730 /* Skip fan attributes if fan is not present */
@@ -1524,19 +1528,23 @@ int radeon_pm_late_init(struct radeon_device *rdev)
1524 1528
1525 if (rdev->pm.pm_method == PM_METHOD_DPM) { 1529 if (rdev->pm.pm_method == PM_METHOD_DPM) {
1526 if (rdev->pm.dpm_enabled) { 1530 if (rdev->pm.dpm_enabled) {
1527 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); 1531 if (!rdev->pm.sysfs_initialized) {
1528 if (ret) 1532 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1529 DRM_ERROR("failed to create device file for dpm state\n"); 1533 if (ret)
1530 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1534 DRM_ERROR("failed to create device file for dpm state\n");
1531 if (ret) 1535 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1532 DRM_ERROR("failed to create device file for dpm state\n"); 1536 if (ret)
1533 /* XXX: these are noops for dpm but are here for backwards compat */ 1537 DRM_ERROR("failed to create device file for dpm state\n");
1534 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1538 /* XXX: these are noops for dpm but are here for backwards compat */
1535 if (ret) 1539 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1536 DRM_ERROR("failed to create device file for power profile\n"); 1540 if (ret)
1537 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1541 DRM_ERROR("failed to create device file for power profile\n");
1538 if (ret) 1542 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1539 DRM_ERROR("failed to create device file for power method\n"); 1543 if (ret)
1544 DRM_ERROR("failed to create device file for power method\n");
1545 if (!ret)
1546 rdev->pm.sysfs_initialized = true;
1547 }
1540 1548
1541 mutex_lock(&rdev->pm.mutex); 1549 mutex_lock(&rdev->pm.mutex);
1542 ret = radeon_dpm_late_enable(rdev); 1550 ret = radeon_dpm_late_enable(rdev);
@@ -1552,7 +1560,8 @@ int radeon_pm_late_init(struct radeon_device *rdev)
1552 } 1560 }
1553 } 1561 }
1554 } else { 1562 } else {
1555 if (rdev->pm.num_power_states > 1) { 1563 if ((rdev->pm.num_power_states > 1) &&
1564 (!rdev->pm.sysfs_initialized)) {
1556 /* where's the best place to put these? */ 1565 /* where's the best place to put these? */
1557 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1566 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1558 if (ret) 1567 if (ret)
@@ -1560,6 +1569,8 @@ int radeon_pm_late_init(struct radeon_device *rdev)
1560 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1569 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1561 if (ret) 1570 if (ret)
1562 DRM_ERROR("failed to create device file for power method\n"); 1571 DRM_ERROR("failed to create device file for power method\n");
1572 if (!ret)
1573 rdev->pm.sysfs_initialized = true;
1563 } 1574 }
1564 } 1575 }
1565 return ret; 1576 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 8a76821177a6..6377e8151000 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -415,16 +415,16 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
415 * 415 *
416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has 416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
417 * command buffers left that are not submitted to hardware, Make sure 417 * command buffers left that are not submitted to hardware, Make sure
418 * IRQ handling is turned on. Otherwise, make sure it's turned off. This 418 * IRQ handling is turned on. Otherwise, make sure it's turned off.
419 * function may return -EAGAIN to indicate it should be rerun due to
420 * possibly missed IRQs if IRQs has just been turned on.
421 */ 419 */
422static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) 420static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
423{ 421{
424 int notempty = 0; 422 int notempty;
425 struct vmw_cmdbuf_context *ctx; 423 struct vmw_cmdbuf_context *ctx;
426 int i; 424 int i;
427 425
426retry:
427 notempty = 0;
428 for_each_cmdbuf_ctx(man, i, ctx) 428 for_each_cmdbuf_ctx(man, i, ctx)
429 vmw_cmdbuf_ctx_process(man, ctx, &notempty); 429 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
430 430
@@ -440,10 +440,8 @@ static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
440 man->irq_on = true; 440 man->irq_on = true;
441 441
442 /* Rerun in case we just missed an irq. */ 442 /* Rerun in case we just missed an irq. */
443 return -EAGAIN; 443 goto retry;
444 } 444 }
445
446 return 0;
447} 445}
448 446
449/** 447/**
@@ -468,8 +466,7 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
468 header->cb_context = cb_context; 466 header->cb_context = cb_context;
469 list_add_tail(&header->list, &man->ctx[cb_context].submitted); 467 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
470 468
471 if (vmw_cmdbuf_man_process(man) == -EAGAIN) 469 vmw_cmdbuf_man_process(man);
472 vmw_cmdbuf_man_process(man);
473} 470}
474 471
475/** 472/**
@@ -488,8 +485,7 @@ static void vmw_cmdbuf_man_tasklet(unsigned long data)
488 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; 485 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
489 486
490 spin_lock(&man->lock); 487 spin_lock(&man->lock);
491 if (vmw_cmdbuf_man_process(man) == -EAGAIN) 488 vmw_cmdbuf_man_process(man);
492 (void) vmw_cmdbuf_man_process(man);
493 spin_unlock(&man->lock); 489 spin_unlock(&man->lock);
494} 490}
495 491
@@ -507,6 +503,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
507 struct vmw_cmdbuf_man *man = 503 struct vmw_cmdbuf_man *man =
508 container_of(work, struct vmw_cmdbuf_man, work); 504 container_of(work, struct vmw_cmdbuf_man, work);
509 struct vmw_cmdbuf_header *entry, *next; 505 struct vmw_cmdbuf_header *entry, *next;
506 uint32_t dummy;
510 bool restart = false; 507 bool restart = false;
511 508
512 spin_lock_bh(&man->lock); 509 spin_lock_bh(&man->lock);
@@ -523,6 +520,8 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
523 if (restart && vmw_cmdbuf_startstop(man, true)) 520 if (restart && vmw_cmdbuf_startstop(man, true))
524 DRM_ERROR("Failed restarting command buffer context 0.\n"); 521 DRM_ERROR("Failed restarting command buffer context 0.\n");
525 522
523 /* Send a new fence in case one was removed */
524 vmw_fifo_send_fence(man->dev_priv, &dummy);
526} 525}
527 526
528/** 527/**
@@ -682,7 +681,7 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
682 DRM_MM_SEARCH_DEFAULT, 681 DRM_MM_SEARCH_DEFAULT,
683 DRM_MM_CREATE_DEFAULT); 682 DRM_MM_CREATE_DEFAULT);
684 if (ret) { 683 if (ret) {
685 (void) vmw_cmdbuf_man_process(man); 684 vmw_cmdbuf_man_process(man);
686 ret = drm_mm_insert_node_generic(&man->mm, info->node, 685 ret = drm_mm_insert_node_generic(&man->mm, info->node,
687 info->page_size, 0, 0, 686 info->page_size, 0, 0,
688 DRM_MM_SEARCH_DEFAULT, 687 DRM_MM_SEARCH_DEFAULT,
@@ -1168,7 +1167,14 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1168 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); 1167 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1169 1168
1170 man->has_pool = true; 1169 man->has_pool = true;
1171 man->default_size = default_size; 1170
1171 /*
1172 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1173 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1174 * needs to wait for space and we block on further command
1175 * submissions to be able to free up space.
1176 */
1177 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1172 DRM_INFO("Using command buffers with %s pool.\n", 1178 DRM_INFO("Using command buffers with %s pool.\n",
1173 (man->using_mob) ? "MOB" : "DMA"); 1179 (man->using_mob) ? "MOB" : "DMA");
1174 1180
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index e13c902e8966..796569eeaf1d 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -840,6 +840,16 @@ config SENSORS_MAX6697
840 This driver can also be built as a module. If so, the module 840 This driver can also be built as a module. If so, the module
841 will be called max6697. 841 will be called max6697.
842 842
843config SENSORS_MAX31790
844 tristate "Maxim MAX31790 sensor chip"
845 depends on I2C
846 help
847 If you say yes here you get support for 6-Channel PWM-Output
848 Fan RPM Controller.
849
850 This driver can also be built as a module. If so, the module
851 will be called max31790.
852
843config SENSORS_HTU21 853config SENSORS_HTU21
844 tristate "Measurement Specialties HTU21D humidity/temperature sensors" 854 tristate "Measurement Specialties HTU21D humidity/temperature sensors"
845 depends on I2C 855 depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 9e0f3dd2841d..01855ee641d1 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -115,6 +115,7 @@ obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
115obj-$(CONFIG_SENSORS_MAX6642) += max6642.o 115obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
116obj-$(CONFIG_SENSORS_MAX6650) += max6650.o 116obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
117obj-$(CONFIG_SENSORS_MAX6697) += max6697.o 117obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
118obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
118obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o 119obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
119obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o 120obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
120obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o 121obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 1fd46859ed29..d87cae8c635f 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -377,7 +377,7 @@ static int setup_irqs(struct platform_device *pdev)
377 } 377 }
378 378
379 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 379 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
380 abx500_temp_irq_handler, IRQF_NO_SUSPEND, "abx500-temp", pdev); 380 abx500_temp_irq_handler, 0, "abx500-temp", pdev);
381 if (ret < 0) 381 if (ret < 0)
382 dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret); 382 dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret);
383 383
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 3e03379e7c5d..6a27eb2fed17 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -52,7 +52,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
52MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); 52MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
53 53
54#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 54#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
55#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ 55#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
56#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */ 56#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
57#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ 57#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
58#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) 58#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index e80ee23b62d3..5f7067d7b625 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -26,6 +26,7 @@
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/bitops.h> 27#include <linux/bitops.h>
28#include <asm/processor.h> 28#include <asm/processor.h>
29#include <asm/msr.h>
29 30
30MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor"); 31MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor");
31MODULE_AUTHOR("Andreas Herrmann <herrmann.der.user@googlemail.com>"); 32MODULE_AUTHOR("Andreas Herrmann <herrmann.der.user@googlemail.com>");
@@ -41,12 +42,21 @@ MODULE_LICENSE("GPL");
41#define REG_TDP_RUNNING_AVERAGE 0xe0 42#define REG_TDP_RUNNING_AVERAGE 0xe0
42#define REG_TDP_LIMIT3 0xe8 43#define REG_TDP_LIMIT3 0xe8
43 44
45#define FAM15H_MIN_NUM_ATTRS 2
46#define FAM15H_NUM_GROUPS 2
47
48#define MSR_F15H_CU_MAX_PWR_ACCUMULATOR 0xc001007b
49
44struct fam15h_power_data { 50struct fam15h_power_data {
45 struct pci_dev *pdev; 51 struct pci_dev *pdev;
46 unsigned int tdp_to_watts; 52 unsigned int tdp_to_watts;
47 unsigned int base_tdp; 53 unsigned int base_tdp;
48 unsigned int processor_pwr_watts; 54 unsigned int processor_pwr_watts;
49 unsigned int cpu_pwr_sample_ratio; 55 unsigned int cpu_pwr_sample_ratio;
56 const struct attribute_group *groups[FAM15H_NUM_GROUPS];
57 struct attribute_group group;
58 /* maximum accumulated power of a compute unit */
59 u64 max_cu_acc_power;
50}; 60};
51 61
52static ssize_t show_power(struct device *dev, 62static ssize_t show_power(struct device *dev,
@@ -105,29 +115,36 @@ static ssize_t show_power_crit(struct device *dev,
105} 115}
106static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL); 116static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
107 117
108static umode_t fam15h_power_is_visible(struct kobject *kobj, 118static int fam15h_power_init_attrs(struct pci_dev *pdev,
109 struct attribute *attr, 119 struct fam15h_power_data *data)
110 int index)
111{ 120{
112 /* power1_input is only reported for Fam15h, Models 00h-0fh */ 121 int n = FAM15H_MIN_NUM_ATTRS;
113 if (attr == &dev_attr_power1_input.attr && 122 struct attribute **fam15h_power_attrs;
114 (boot_cpu_data.x86 != 0x15 || boot_cpu_data.x86_model > 0xf)) 123 struct cpuinfo_x86 *c = &boot_cpu_data;
115 return 0;
116 124
117 return attr->mode; 125 if (c->x86 == 0x15 &&
118} 126 (c->x86_model <= 0xf ||
127 (c->x86_model >= 0x60 && c->x86_model <= 0x6f)))
128 n += 1;
119 129
120static struct attribute *fam15h_power_attrs[] = { 130 fam15h_power_attrs = devm_kcalloc(&pdev->dev, n,
121 &dev_attr_power1_input.attr, 131 sizeof(*fam15h_power_attrs),
122 &dev_attr_power1_crit.attr, 132 GFP_KERNEL);
123 NULL
124};
125 133
126static const struct attribute_group fam15h_power_group = { 134 if (!fam15h_power_attrs)
127 .attrs = fam15h_power_attrs, 135 return -ENOMEM;
128 .is_visible = fam15h_power_is_visible, 136
129}; 137 n = 0;
130__ATTRIBUTE_GROUPS(fam15h_power); 138 fam15h_power_attrs[n++] = &dev_attr_power1_crit.attr;
139 if (c->x86 == 0x15 &&
140 (c->x86_model <= 0xf ||
141 (c->x86_model >= 0x60 && c->x86_model <= 0x6f)))
142 fam15h_power_attrs[n++] = &dev_attr_power1_input.attr;
143
144 data->group.attrs = fam15h_power_attrs;
145
146 return 0;
147}
131 148
132static bool should_load_on_this_node(struct pci_dev *f4) 149static bool should_load_on_this_node(struct pci_dev *f4)
133{ 150{
@@ -186,11 +203,12 @@ static int fam15h_power_resume(struct pci_dev *pdev)
186#define fam15h_power_resume NULL 203#define fam15h_power_resume NULL
187#endif 204#endif
188 205
189static void fam15h_power_init_data(struct pci_dev *f4, 206static int fam15h_power_init_data(struct pci_dev *f4,
190 struct fam15h_power_data *data) 207 struct fam15h_power_data *data)
191{ 208{
192 u32 val, eax, ebx, ecx, edx; 209 u32 val, eax, ebx, ecx, edx;
193 u64 tmp; 210 u64 tmp;
211 int ret;
194 212
195 pci_read_config_dword(f4, REG_PROCESSOR_TDP, &val); 213 pci_read_config_dword(f4, REG_PROCESSOR_TDP, &val);
196 data->base_tdp = val >> 16; 214 data->base_tdp = val >> 16;
@@ -211,11 +229,15 @@ static void fam15h_power_init_data(struct pci_dev *f4,
211 /* convert to microWatt */ 229 /* convert to microWatt */
212 data->processor_pwr_watts = (tmp * 15625) >> 10; 230 data->processor_pwr_watts = (tmp * 15625) >> 10;
213 231
232 ret = fam15h_power_init_attrs(f4, data);
233 if (ret)
234 return ret;
235
214 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); 236 cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
215 237
216 /* CPUID Fn8000_0007:EDX[12] indicates to support accumulated power */ 238 /* CPUID Fn8000_0007:EDX[12] indicates to support accumulated power */
217 if (!(edx & BIT(12))) 239 if (!(edx & BIT(12)))
218 return; 240 return 0;
219 241
220 /* 242 /*
221 * determine the ratio of the compute unit power accumulator 243 * determine the ratio of the compute unit power accumulator
@@ -223,14 +245,24 @@ static void fam15h_power_init_data(struct pci_dev *f4,
223 * Fn8000_0007:ECX 245 * Fn8000_0007:ECX
224 */ 246 */
225 data->cpu_pwr_sample_ratio = ecx; 247 data->cpu_pwr_sample_ratio = ecx;
248
249 if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
250 pr_err("Failed to read max compute unit power accumulator MSR\n");
251 return -ENODEV;
252 }
253
254 data->max_cu_acc_power = tmp;
255
256 return 0;
226} 257}
227 258
228static int fam15h_power_probe(struct pci_dev *pdev, 259static int fam15h_power_probe(struct pci_dev *pdev,
229 const struct pci_device_id *id) 260 const struct pci_device_id *id)
230{ 261{
231 struct fam15h_power_data *data; 262 struct fam15h_power_data *data;
232 struct device *dev = &pdev->dev; 263 struct device *dev = &pdev->dev;
233 struct device *hwmon_dev; 264 struct device *hwmon_dev;
265 int ret;
234 266
235 /* 267 /*
236 * though we ignore every other northbridge, we still have to 268 * though we ignore every other northbridge, we still have to
@@ -246,12 +278,17 @@ static int fam15h_power_probe(struct pci_dev *pdev,
246 if (!data) 278 if (!data)
247 return -ENOMEM; 279 return -ENOMEM;
248 280
249 fam15h_power_init_data(pdev, data); 281 ret = fam15h_power_init_data(pdev, data);
282 if (ret)
283 return ret;
284
250 data->pdev = pdev; 285 data->pdev = pdev;
251 286
287 data->groups[0] = &data->group;
288
252 hwmon_dev = devm_hwmon_device_register_with_groups(dev, "fam15h_power", 289 hwmon_dev = devm_hwmon_device_register_with_groups(dev, "fam15h_power",
253 data, 290 data,
254 fam15h_power_groups); 291 &data->groups[0]);
255 return PTR_ERR_OR_ZERO(hwmon_dev); 292 return PTR_ERR_OR_ZERO(hwmon_dev);
256} 293}
257 294
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 4255514b2c72..55b5a8ff1cfe 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -474,11 +474,18 @@ static const struct platform_device_id opal_sensor_driver_ids[] = {
474}; 474};
475MODULE_DEVICE_TABLE(platform, opal_sensor_driver_ids); 475MODULE_DEVICE_TABLE(platform, opal_sensor_driver_ids);
476 476
477static const struct of_device_id opal_sensor_match[] = {
478 { .compatible = "ibm,opal-sensor" },
479 { },
480};
481MODULE_DEVICE_TABLE(of, opal_sensor_match);
482
477static struct platform_driver ibmpowernv_driver = { 483static struct platform_driver ibmpowernv_driver = {
478 .probe = ibmpowernv_probe, 484 .probe = ibmpowernv_probe,
479 .id_table = opal_sensor_driver_ids, 485 .id_table = opal_sensor_driver_ids,
480 .driver = { 486 .driver = {
481 .name = DRVNAME, 487 .name = DRVNAME,
488 .of_match_table = opal_sensor_match,
482 }, 489 },
483}; 490};
484 491
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 4d2815079fc2..b24f1d3045f0 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -37,6 +37,7 @@
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/util_macros.h> 39#include <linux/util_macros.h>
40#include <linux/regmap.h>
40 41
41#include <linux/platform_data/ina2xx.h> 42#include <linux/platform_data/ina2xx.h>
42 43
@@ -84,6 +85,11 @@
84 */ 85 */
85#define INA226_TOTAL_CONV_TIME_DEFAULT 2200 86#define INA226_TOTAL_CONV_TIME_DEFAULT 2200
86 87
88static struct regmap_config ina2xx_regmap_config = {
89 .reg_bits = 8,
90 .val_bits = 16,
91};
92
87enum ina2xx_ids { ina219, ina226 }; 93enum ina2xx_ids { ina219, ina226 };
88 94
89struct ina2xx_config { 95struct ina2xx_config {
@@ -97,20 +103,13 @@ struct ina2xx_config {
97}; 103};
98 104
99struct ina2xx_data { 105struct ina2xx_data {
100 struct i2c_client *client;
101 const struct ina2xx_config *config; 106 const struct ina2xx_config *config;
102 107
103 long rshunt; 108 long rshunt;
104 u16 curr_config; 109 struct mutex config_lock;
105 110 struct regmap *regmap;
106 struct mutex update_lock;
107 bool valid;
108 unsigned long last_updated;
109 int update_interval; /* in jiffies */
110 111
111 int kind;
112 const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS]; 112 const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS];
113 u16 regs[INA2XX_MAX_REGISTERS];
114}; 113};
115 114
116static const struct ina2xx_config ina2xx_config[] = { 115static const struct ina2xx_config ina2xx_config[] = {
@@ -153,7 +152,11 @@ static int ina226_reg_to_interval(u16 config)
153 return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000); 152 return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000);
154} 153}
155 154
156static u16 ina226_interval_to_reg(int interval, u16 config) 155/*
156 * Return the new, shifted AVG field value of CONFIG register,
157 * to use with regmap_update_bits
158 */
159static u16 ina226_interval_to_reg(int interval)
157{ 160{
158 int avg, avg_bits; 161 int avg, avg_bits;
159 162
@@ -162,15 +165,7 @@ static u16 ina226_interval_to_reg(int interval, u16 config)
162 avg_bits = find_closest(avg, ina226_avg_tab, 165 avg_bits = find_closest(avg, ina226_avg_tab,
163 ARRAY_SIZE(ina226_avg_tab)); 166 ARRAY_SIZE(ina226_avg_tab));
164 167
165 return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits); 168 return INA226_SHIFT_AVG(avg_bits);
166}
167
168static void ina226_set_update_interval(struct ina2xx_data *data)
169{
170 int ms;
171
172 ms = ina226_reg_to_interval(data->curr_config);
173 data->update_interval = msecs_to_jiffies(ms);
174} 169}
175 170
176static int ina2xx_calibrate(struct ina2xx_data *data) 171static int ina2xx_calibrate(struct ina2xx_data *data)
@@ -178,8 +173,7 @@ static int ina2xx_calibrate(struct ina2xx_data *data)
178 u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, 173 u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
179 data->rshunt); 174 data->rshunt);
180 175
181 return i2c_smbus_write_word_swapped(data->client, 176 return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
182 INA2XX_CALIBRATION, val);
183} 177}
184 178
185/* 179/*
@@ -187,12 +181,8 @@ static int ina2xx_calibrate(struct ina2xx_data *data)
187 */ 181 */
188static int ina2xx_init(struct ina2xx_data *data) 182static int ina2xx_init(struct ina2xx_data *data)
189{ 183{
190 struct i2c_client *client = data->client; 184 int ret = regmap_write(data->regmap, INA2XX_CONFIG,
191 int ret; 185 data->config->config_default);
192
193 /* device configuration */
194 ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
195 data->curr_config);
196 if (ret < 0) 186 if (ret < 0)
197 return ret; 187 return ret;
198 188
@@ -203,47 +193,52 @@ static int ina2xx_init(struct ina2xx_data *data)
203 return ina2xx_calibrate(data); 193 return ina2xx_calibrate(data);
204} 194}
205 195
206static int ina2xx_do_update(struct device *dev) 196static int ina2xx_read_reg(struct device *dev, int reg, unsigned int *regval)
207{ 197{
208 struct ina2xx_data *data = dev_get_drvdata(dev); 198 struct ina2xx_data *data = dev_get_drvdata(dev);
209 struct i2c_client *client = data->client; 199 int ret, retry;
210 int i, rv, retry;
211 200
212 dev_dbg(&client->dev, "Starting ina2xx update\n"); 201 dev_dbg(dev, "Starting register %d read\n", reg);
213 202
214 for (retry = 5; retry; retry--) { 203 for (retry = 5; retry; retry--) {
215 /* Read all registers */ 204
216 for (i = 0; i < data->config->registers; i++) { 205 ret = regmap_read(data->regmap, reg, regval);
217 rv = i2c_smbus_read_word_swapped(client, i); 206 if (ret < 0)
218 if (rv < 0) 207 return ret;
219 return rv; 208
220 data->regs[i] = rv; 209 dev_dbg(dev, "read %d, val = 0x%04x\n", reg, *regval);
221 }
222 210
223 /* 211 /*
224 * If the current value in the calibration register is 0, the 212 * If the current value in the calibration register is 0, the
225 * power and current registers will also remain at 0. In case 213 * power and current registers will also remain at 0. In case
226 * the chip has been reset let's check the calibration 214 * the chip has been reset let's check the calibration
227 * register and reinitialize if needed. 215 * register and reinitialize if needed.
216 * We do that extra read of the calibration register if there
217 * is some hint of a chip reset.
228 */ 218 */
229 if (data->regs[INA2XX_CALIBRATION] == 0) { 219 if (*regval == 0) {
230 dev_warn(dev, "chip not calibrated, reinitializing\n"); 220 unsigned int cal;
231 221
232 rv = ina2xx_init(data); 222 ret = regmap_read(data->regmap, INA2XX_CALIBRATION,
233 if (rv < 0) 223 &cal);
234 return rv; 224 if (ret < 0)
235 225 return ret;
236 /* 226
237 * Let's make sure the power and current registers 227 if (cal == 0) {
238 * have been updated before trying again. 228 dev_warn(dev, "chip not calibrated, reinitializing\n");
239 */ 229
240 msleep(INA2XX_MAX_DELAY); 230 ret = ina2xx_init(data);
241 continue; 231 if (ret < 0)
232 return ret;
233 /*
234 * Let's make sure the power and current
235 * registers have been updated before trying
236 * again.
237 */
238 msleep(INA2XX_MAX_DELAY);
239 continue;
240 }
242 } 241 }
243
244 data->last_updated = jiffies;
245 data->valid = 1;
246
247 return 0; 242 return 0;
248 } 243 }
249 244
@@ -256,51 +251,31 @@ static int ina2xx_do_update(struct device *dev)
256 return -ENODEV; 251 return -ENODEV;
257} 252}
258 253
259static struct ina2xx_data *ina2xx_update_device(struct device *dev) 254static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
260{ 255 unsigned int regval)
261 struct ina2xx_data *data = dev_get_drvdata(dev);
262 struct ina2xx_data *ret = data;
263 unsigned long after;
264 int rv;
265
266 mutex_lock(&data->update_lock);
267
268 after = data->last_updated + data->update_interval;
269 if (time_after(jiffies, after) || !data->valid) {
270 rv = ina2xx_do_update(dev);
271 if (rv < 0)
272 ret = ERR_PTR(rv);
273 }
274
275 mutex_unlock(&data->update_lock);
276 return ret;
277}
278
279static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
280{ 256{
281 int val; 257 int val;
282 258
283 switch (reg) { 259 switch (reg) {
284 case INA2XX_SHUNT_VOLTAGE: 260 case INA2XX_SHUNT_VOLTAGE:
285 /* signed register */ 261 /* signed register */
286 val = DIV_ROUND_CLOSEST((s16)data->regs[reg], 262 val = DIV_ROUND_CLOSEST((s16)regval, data->config->shunt_div);
287 data->config->shunt_div);
288 break; 263 break;
289 case INA2XX_BUS_VOLTAGE: 264 case INA2XX_BUS_VOLTAGE:
290 val = (data->regs[reg] >> data->config->bus_voltage_shift) 265 val = (regval >> data->config->bus_voltage_shift)
291 * data->config->bus_voltage_lsb; 266 * data->config->bus_voltage_lsb;
292 val = DIV_ROUND_CLOSEST(val, 1000); 267 val = DIV_ROUND_CLOSEST(val, 1000);
293 break; 268 break;
294 case INA2XX_POWER: 269 case INA2XX_POWER:
295 val = data->regs[reg] * data->config->power_lsb; 270 val = regval * data->config->power_lsb;
296 break; 271 break;
297 case INA2XX_CURRENT: 272 case INA2XX_CURRENT:
298 /* signed register, LSB=1mA (selected), in mA */ 273 /* signed register, LSB=1mA (selected), in mA */
299 val = (s16)data->regs[reg]; 274 val = (s16)regval;
300 break; 275 break;
301 case INA2XX_CALIBRATION: 276 case INA2XX_CALIBRATION:
302 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, 277 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
303 data->regs[reg]); 278 regval);
304 break; 279 break;
305 default: 280 default:
306 /* programmer goofed */ 281 /* programmer goofed */
@@ -316,25 +291,25 @@ static ssize_t ina2xx_show_value(struct device *dev,
316 struct device_attribute *da, char *buf) 291 struct device_attribute *da, char *buf)
317{ 292{
318 struct sensor_device_attribute *attr = to_sensor_dev_attr(da); 293 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
319 struct ina2xx_data *data = ina2xx_update_device(dev); 294 struct ina2xx_data *data = dev_get_drvdata(dev);
295 unsigned int regval;
296
297 int err = ina2xx_read_reg(dev, attr->index, &regval);
320 298
321 if (IS_ERR(data)) 299 if (err < 0)
322 return PTR_ERR(data); 300 return err;
323 301
324 return snprintf(buf, PAGE_SIZE, "%d\n", 302 return snprintf(buf, PAGE_SIZE, "%d\n",
325 ina2xx_get_value(data, attr->index)); 303 ina2xx_get_value(data, attr->index, regval));
326} 304}
327 305
328static ssize_t ina2xx_set_shunt(struct device *dev, 306static ssize_t ina2xx_set_shunt(struct device *dev,
329 struct device_attribute *da, 307 struct device_attribute *da,
330 const char *buf, size_t count) 308 const char *buf, size_t count)
331{ 309{
332 struct ina2xx_data *data = ina2xx_update_device(dev);
333 unsigned long val; 310 unsigned long val;
334 int status; 311 int status;
335 312 struct ina2xx_data *data = dev_get_drvdata(dev);
336 if (IS_ERR(data))
337 return PTR_ERR(data);
338 313
339 status = kstrtoul(buf, 10, &val); 314 status = kstrtoul(buf, 10, &val);
340 if (status < 0) 315 if (status < 0)
@@ -345,10 +320,10 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
345 val > data->config->calibration_factor) 320 val > data->config->calibration_factor)
346 return -EINVAL; 321 return -EINVAL;
347 322
348 mutex_lock(&data->update_lock); 323 mutex_lock(&data->config_lock);
349 data->rshunt = val; 324 data->rshunt = val;
350 status = ina2xx_calibrate(data); 325 status = ina2xx_calibrate(data);
351 mutex_unlock(&data->update_lock); 326 mutex_unlock(&data->config_lock);
352 if (status < 0) 327 if (status < 0)
353 return status; 328 return status;
354 329
@@ -370,17 +345,9 @@ static ssize_t ina226_set_interval(struct device *dev,
370 if (val > INT_MAX || val == 0) 345 if (val > INT_MAX || val == 0)
371 return -EINVAL; 346 return -EINVAL;
372 347
373 mutex_lock(&data->update_lock); 348 status = regmap_update_bits(data->regmap, INA2XX_CONFIG,
374 data->curr_config = ina226_interval_to_reg(val, 349 INA226_AVG_RD_MASK,
375 data->regs[INA2XX_CONFIG]); 350 ina226_interval_to_reg(val));
376 status = i2c_smbus_write_word_swapped(data->client,
377 INA2XX_CONFIG,
378 data->curr_config);
379
380 ina226_set_update_interval(data);
381 /* Make sure the next access re-reads all registers. */
382 data->valid = 0;
383 mutex_unlock(&data->update_lock);
384 if (status < 0) 351 if (status < 0)
385 return status; 352 return status;
386 353
@@ -390,18 +357,15 @@ static ssize_t ina226_set_interval(struct device *dev,
390static ssize_t ina226_show_interval(struct device *dev, 357static ssize_t ina226_show_interval(struct device *dev,
391 struct device_attribute *da, char *buf) 358 struct device_attribute *da, char *buf)
392{ 359{
393 struct ina2xx_data *data = ina2xx_update_device(dev); 360 struct ina2xx_data *data = dev_get_drvdata(dev);
361 int status;
362 unsigned int regval;
394 363
395 if (IS_ERR(data)) 364 status = regmap_read(data->regmap, INA2XX_CONFIG, &regval);
396 return PTR_ERR(data); 365 if (status)
366 return status;
397 367
398 /* 368 return snprintf(buf, PAGE_SIZE, "%d\n", ina226_reg_to_interval(regval));
399 * We don't use data->update_interval here as we want to display
400 * the actual interval used by the chip and jiffies_to_msecs()
401 * doesn't seem to be accurate enough.
402 */
403 return snprintf(buf, PAGE_SIZE, "%d\n",
404 ina226_reg_to_interval(data->regs[INA2XX_CONFIG]));
405} 369}
406 370
407/* shunt voltage */ 371/* shunt voltage */
@@ -455,60 +419,51 @@ static const struct attribute_group ina226_group = {
455static int ina2xx_probe(struct i2c_client *client, 419static int ina2xx_probe(struct i2c_client *client,
456 const struct i2c_device_id *id) 420 const struct i2c_device_id *id)
457{ 421{
458 struct i2c_adapter *adapter = client->adapter;
459 struct ina2xx_platform_data *pdata;
460 struct device *dev = &client->dev; 422 struct device *dev = &client->dev;
461 struct ina2xx_data *data; 423 struct ina2xx_data *data;
462 struct device *hwmon_dev; 424 struct device *hwmon_dev;
463 u32 val; 425 u32 val;
464 int ret, group = 0; 426 int ret, group = 0;
465 427
466 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
467 return -ENODEV;
468
469 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 428 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
470 if (!data) 429 if (!data)
471 return -ENOMEM; 430 return -ENOMEM;
472 431
473 if (dev_get_platdata(dev)) {
474 pdata = dev_get_platdata(dev);
475 data->rshunt = pdata->shunt_uohms;
476 } else if (!of_property_read_u32(dev->of_node,
477 "shunt-resistor", &val)) {
478 data->rshunt = val;
479 } else {
480 data->rshunt = INA2XX_RSHUNT_DEFAULT;
481 }
482
483 /* set the device type */ 432 /* set the device type */
484 data->kind = id->driver_data; 433 data->config = &ina2xx_config[id->driver_data];
485 data->config = &ina2xx_config[data->kind];
486 data->curr_config = data->config->config_default;
487 data->client = client;
488 434
489 /* 435 if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
490 * Ina226 has a variable update_interval. For ina219 we 436 struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
491 * use a constant value. 437
492 */ 438 if (pdata)
493 if (data->kind == ina226) 439 val = pdata->shunt_uohms;
494 ina226_set_update_interval(data); 440 else
495 else 441 val = INA2XX_RSHUNT_DEFAULT;
496 data->update_interval = HZ / INA2XX_CONVERSION_RATE; 442 }
497 443
498 if (data->rshunt <= 0 || 444 if (val <= 0 || val > data->config->calibration_factor)
499 data->rshunt > data->config->calibration_factor)
500 return -ENODEV; 445 return -ENODEV;
501 446
447 data->rshunt = val;
448
449 ina2xx_regmap_config.max_register = data->config->registers;
450
451 data->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config);
452 if (IS_ERR(data->regmap)) {
453 dev_err(dev, "failed to allocate register map\n");
454 return PTR_ERR(data->regmap);
455 }
456
502 ret = ina2xx_init(data); 457 ret = ina2xx_init(data);
503 if (ret < 0) { 458 if (ret < 0) {
504 dev_err(dev, "error configuring the device: %d\n", ret); 459 dev_err(dev, "error configuring the device: %d\n", ret);
505 return -ENODEV; 460 return -ENODEV;
506 } 461 }
507 462
508 mutex_init(&data->update_lock); 463 mutex_init(&data->config_lock);
509 464
510 data->groups[group++] = &ina2xx_group; 465 data->groups[group++] = &ina2xx_group;
511 if (data->kind == ina226) 466 if (id->driver_data == ina226)
512 data->groups[group++] = &ina226_group; 467 data->groups[group++] = &ina226_group;
513 468
514 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, 469 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index e4e57bbafb10..0addc84ba948 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -57,6 +57,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
57 tmp175, 57 tmp175,
58 tmp275, 58 tmp275,
59 tmp75, 59 tmp75,
60 tmp75c,
60}; 61};
61 62
62/* Addresses scanned */ 63/* Addresses scanned */
@@ -280,6 +281,11 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
280 data->resolution = 12; 281 data->resolution = 12;
281 data->sample_time = HZ / 2; 282 data->sample_time = HZ / 2;
282 break; 283 break;
284 case tmp75c:
285 clr_mask |= 1 << 5; /* not one-shot mode */
286 data->resolution = 12;
287 data->sample_time = HZ / 4;
288 break;
283 } 289 }
284 290
285 /* configure as specified */ 291 /* configure as specified */
@@ -343,6 +349,7 @@ static const struct i2c_device_id lm75_ids[] = {
343 { "tmp175", tmp175, }, 349 { "tmp175", tmp175, },
344 { "tmp275", tmp275, }, 350 { "tmp275", tmp275, },
345 { "tmp75", tmp75, }, 351 { "tmp75", tmp75, },
352 { "tmp75c", tmp75c, },
346 { /* LIST END */ } 353 { /* LIST END */ }
347}; 354};
348MODULE_DEVICE_TABLE(i2c, lm75_ids); 355MODULE_DEVICE_TABLE(i2c, lm75_ids);
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
new file mode 100644
index 000000000000..69c0ac80a946
--- /dev/null
+++ b/drivers/hwmon/max31790.c
@@ -0,0 +1,603 @@
1/*
2 * max31790.c - Part of lm_sensors, Linux kernel modules for hardware
3 * monitoring.
4 *
5 * (C) 2015 by Il Han <corone.il.han@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/err.h>
19#include <linux/hwmon.h>
20#include <linux/hwmon-sysfs.h>
21#include <linux/i2c.h>
22#include <linux/init.h>
23#include <linux/jiffies.h>
24#include <linux/module.h>
25#include <linux/slab.h>
26
27/* MAX31790 registers */
28#define MAX31790_REG_GLOBAL_CONFIG 0x00
29#define MAX31790_REG_FAN_CONFIG(ch) (0x02 + (ch))
30#define MAX31790_REG_FAN_DYNAMICS(ch) (0x08 + (ch))
31#define MAX31790_REG_FAN_FAULT_STATUS2 0x10
32#define MAX31790_REG_FAN_FAULT_STATUS1 0x11
33#define MAX31790_REG_TACH_COUNT(ch) (0x18 + (ch) * 2)
34#define MAX31790_REG_PWM_DUTY_CYCLE(ch) (0x30 + (ch) * 2)
35#define MAX31790_REG_PWMOUT(ch) (0x40 + (ch) * 2)
36#define MAX31790_REG_TARGET_COUNT(ch) (0x50 + (ch) * 2)
37
38/* Fan Config register bits */
39#define MAX31790_FAN_CFG_RPM_MODE 0x80
40#define MAX31790_FAN_CFG_TACH_INPUT_EN 0x08
41#define MAX31790_FAN_CFG_TACH_INPUT 0x01
42
43/* Fan Dynamics register bits */
44#define MAX31790_FAN_DYN_SR_SHIFT 5
45#define MAX31790_FAN_DYN_SR_MASK 0xE0
46#define SR_FROM_REG(reg) (((reg) & MAX31790_FAN_DYN_SR_MASK) \
47 >> MAX31790_FAN_DYN_SR_SHIFT)
48
49#define FAN_RPM_MIN 120
50#define FAN_RPM_MAX 7864320
51
52#define RPM_FROM_REG(reg, sr) (((reg) >> 4) ? \
53 ((60 * (sr) * 8192) / ((reg) >> 4)) : \
54 FAN_RPM_MAX)
55#define RPM_TO_REG(rpm, sr) ((60 * (sr) * 8192) / ((rpm) * 2))
56
57#define NR_CHANNEL 6
58
59/*
60 * Client data (each client gets its own)
61 */
62struct max31790_data {
63 struct i2c_client *client;
64 struct mutex update_lock;
65 bool valid; /* zero until following fields are valid */
66 unsigned long last_updated; /* in jiffies */
67
68 /* register values */
69 u8 fan_config[NR_CHANNEL];
70 u8 fan_dynamics[NR_CHANNEL];
71 u16 fault_status;
72 u16 tach[NR_CHANNEL * 2];
73 u16 pwm[NR_CHANNEL];
74 u16 target_count[NR_CHANNEL];
75};
76
77static struct max31790_data *max31790_update_device(struct device *dev)
78{
79 struct max31790_data *data = dev_get_drvdata(dev);
80 struct i2c_client *client = data->client;
81 struct max31790_data *ret = data;
82 int i;
83 int rv;
84
85 mutex_lock(&data->update_lock);
86
87 if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
88 rv = i2c_smbus_read_byte_data(client,
89 MAX31790_REG_FAN_FAULT_STATUS1);
90 if (rv < 0)
91 goto abort;
92 data->fault_status = rv & 0x3F;
93
94 rv = i2c_smbus_read_byte_data(client,
95 MAX31790_REG_FAN_FAULT_STATUS2);
96 if (rv < 0)
97 goto abort;
98 data->fault_status |= (rv & 0x3F) << 6;
99
100 for (i = 0; i < NR_CHANNEL; i++) {
101 rv = i2c_smbus_read_word_swapped(client,
102 MAX31790_REG_TACH_COUNT(i));
103 if (rv < 0)
104 goto abort;
105 data->tach[i] = rv;
106
107 if (data->fan_config[i]
108 & MAX31790_FAN_CFG_TACH_INPUT) {
109 rv = i2c_smbus_read_word_swapped(client,
110 MAX31790_REG_TACH_COUNT(NR_CHANNEL
111 + i));
112 if (rv < 0)
113 goto abort;
114 data->tach[NR_CHANNEL + i] = rv;
115 } else {
116 rv = i2c_smbus_read_word_swapped(client,
117 MAX31790_REG_PWMOUT(i));
118 if (rv < 0)
119 goto abort;
120 data->pwm[i] = rv;
121
122 rv = i2c_smbus_read_word_swapped(client,
123 MAX31790_REG_TARGET_COUNT(i));
124 if (rv < 0)
125 goto abort;
126 data->target_count[i] = rv;
127 }
128 }
129
130 data->last_updated = jiffies;
131 data->valid = true;
132 }
133 goto done;
134
135abort:
136 data->valid = false;
137 ret = ERR_PTR(rv);
138
139done:
140 mutex_unlock(&data->update_lock);
141
142 return ret;
143}
144
145static const u8 tach_period[8] = { 1, 2, 4, 8, 16, 32, 32, 32 };
146
147static u8 get_tach_period(u8 fan_dynamics)
148{
149 return tach_period[SR_FROM_REG(fan_dynamics)];
150}
151
152static u8 bits_for_tach_period(int rpm)
153{
154 u8 bits;
155
156 if (rpm < 500)
157 bits = 0x0;
158 else if (rpm < 1000)
159 bits = 0x1;
160 else if (rpm < 2000)
161 bits = 0x2;
162 else if (rpm < 4000)
163 bits = 0x3;
164 else if (rpm < 8000)
165 bits = 0x4;
166 else
167 bits = 0x5;
168
169 return bits;
170}
171
172static ssize_t get_fan(struct device *dev,
173 struct device_attribute *devattr, char *buf)
174{
175 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
176 struct max31790_data *data = max31790_update_device(dev);
177 int sr, rpm;
178
179 if (IS_ERR(data))
180 return PTR_ERR(data);
181
182 sr = get_tach_period(data->fan_dynamics[attr->index]);
183 rpm = RPM_FROM_REG(data->tach[attr->index], sr);
184
185 return sprintf(buf, "%d\n", rpm);
186}
187
188static ssize_t get_fan_target(struct device *dev,
189 struct device_attribute *devattr, char *buf)
190{
191 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
192 struct max31790_data *data = max31790_update_device(dev);
193 int sr, rpm;
194
195 if (IS_ERR(data))
196 return PTR_ERR(data);
197
198 sr = get_tach_period(data->fan_dynamics[attr->index]);
199 rpm = RPM_FROM_REG(data->target_count[attr->index], sr);
200
201 return sprintf(buf, "%d\n", rpm);
202}
203
204static ssize_t set_fan_target(struct device *dev,
205 struct device_attribute *devattr,
206 const char *buf, size_t count)
207{
208 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
209 struct max31790_data *data = dev_get_drvdata(dev);
210 struct i2c_client *client = data->client;
211 u8 bits;
212 int sr;
213 int target_count;
214 unsigned long rpm;
215 int err;
216
217 err = kstrtoul(buf, 10, &rpm);
218 if (err)
219 return err;
220
221 mutex_lock(&data->update_lock);
222
223 rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
224 bits = bits_for_tach_period(rpm);
225 data->fan_dynamics[attr->index] =
226 ((data->fan_dynamics[attr->index]
227 & ~MAX31790_FAN_DYN_SR_MASK)
228 | (bits << MAX31790_FAN_DYN_SR_SHIFT));
229 err = i2c_smbus_write_byte_data(client,
230 MAX31790_REG_FAN_DYNAMICS(attr->index),
231 data->fan_dynamics[attr->index]);
232
233 if (err < 0) {
234 mutex_unlock(&data->update_lock);
235 return err;
236 }
237
238 sr = get_tach_period(data->fan_dynamics[attr->index]);
239 target_count = RPM_TO_REG(rpm, sr);
240 target_count = clamp_val(target_count, 0x1, 0x7FF);
241
242 data->target_count[attr->index] = target_count << 5;
243
244 err = i2c_smbus_write_word_swapped(client,
245 MAX31790_REG_TARGET_COUNT(attr->index),
246 data->target_count[attr->index]);
247
248 mutex_unlock(&data->update_lock);
249
250 if (err < 0)
251 return err;
252
253 return count;
254}
255
256static ssize_t get_pwm(struct device *dev,
257 struct device_attribute *devattr, char *buf)
258{
259 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
260 struct max31790_data *data = max31790_update_device(dev);
261 int pwm;
262
263 if (IS_ERR(data))
264 return PTR_ERR(data);
265
266 pwm = data->pwm[attr->index] >> 8;
267
268 return sprintf(buf, "%d\n", pwm);
269}
270
271static ssize_t set_pwm(struct device *dev,
272 struct device_attribute *devattr,
273 const char *buf, size_t count)
274{
275 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
276 struct max31790_data *data = dev_get_drvdata(dev);
277 struct i2c_client *client = data->client;
278 unsigned long pwm;
279 int err;
280
281 err = kstrtoul(buf, 10, &pwm);
282 if (err)
283 return err;
284
285 if (pwm > 255)
286 return -EINVAL;
287
288 mutex_lock(&data->update_lock);
289
290 data->pwm[attr->index] = pwm << 8;
291 err = i2c_smbus_write_word_swapped(client,
292 MAX31790_REG_PWMOUT(attr->index),
293 data->pwm[attr->index]);
294
295 mutex_unlock(&data->update_lock);
296
297 if (err < 0)
298 return err;
299
300 return count;
301}
302
303static ssize_t get_pwm_enable(struct device *dev,
304 struct device_attribute *devattr, char *buf)
305{
306 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
307 struct max31790_data *data = max31790_update_device(dev);
308 int mode;
309
310 if (IS_ERR(data))
311 return PTR_ERR(data);
312
313 if (data->fan_config[attr->index] & MAX31790_FAN_CFG_RPM_MODE)
314 mode = 2;
315 else if (data->fan_config[attr->index] & MAX31790_FAN_CFG_TACH_INPUT_EN)
316 mode = 1;
317 else
318 mode = 0;
319
320 return sprintf(buf, "%d\n", mode);
321}
322
323static ssize_t set_pwm_enable(struct device *dev,
324 struct device_attribute *devattr,
325 const char *buf, size_t count)
326{
327 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
328 struct max31790_data *data = dev_get_drvdata(dev);
329 struct i2c_client *client = data->client;
330 unsigned long mode;
331 int err;
332
333 err = kstrtoul(buf, 10, &mode);
334 if (err)
335 return err;
336
337 switch (mode) {
338 case 0:
339 data->fan_config[attr->index] =
340 data->fan_config[attr->index]
341 & ~(MAX31790_FAN_CFG_TACH_INPUT_EN
342 | MAX31790_FAN_CFG_RPM_MODE);
343 break;
344 case 1:
345 data->fan_config[attr->index] =
346 (data->fan_config[attr->index]
347 | MAX31790_FAN_CFG_TACH_INPUT_EN)
348 & ~MAX31790_FAN_CFG_RPM_MODE;
349 break;
350 case 2:
351 data->fan_config[attr->index] =
352 data->fan_config[attr->index]
353 | MAX31790_FAN_CFG_TACH_INPUT_EN
354 | MAX31790_FAN_CFG_RPM_MODE;
355 break;
356 default:
357 return -EINVAL;
358 }
359
360 mutex_lock(&data->update_lock);
361
362 err = i2c_smbus_write_byte_data(client,
363 MAX31790_REG_FAN_CONFIG(attr->index),
364 data->fan_config[attr->index]);
365
366 mutex_unlock(&data->update_lock);
367
368 if (err < 0)
369 return err;
370
371 return count;
372}
373
374static ssize_t get_fan_fault(struct device *dev,
375 struct device_attribute *devattr, char *buf)
376{
377 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
378 struct max31790_data *data = max31790_update_device(dev);
379 int fault;
380
381 if (IS_ERR(data))
382 return PTR_ERR(data);
383
384 fault = !!(data->fault_status & (1 << attr->index));
385
386 return sprintf(buf, "%d\n", fault);
387}
388
389static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
390static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
391static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
392static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3);
393static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, get_fan, NULL, 4);
394static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, get_fan, NULL, 5);
395
396static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_fan_fault, NULL, 0);
397static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, get_fan_fault, NULL, 1);
398static SENSOR_DEVICE_ATTR(fan3_fault, S_IRUGO, get_fan_fault, NULL, 2);
399static SENSOR_DEVICE_ATTR(fan4_fault, S_IRUGO, get_fan_fault, NULL, 3);
400static SENSOR_DEVICE_ATTR(fan5_fault, S_IRUGO, get_fan_fault, NULL, 4);
401static SENSOR_DEVICE_ATTR(fan6_fault, S_IRUGO, get_fan_fault, NULL, 5);
402
403static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, get_fan, NULL, 6);
404static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, get_fan, NULL, 7);
405static SENSOR_DEVICE_ATTR(fan9_input, S_IRUGO, get_fan, NULL, 8);
406static SENSOR_DEVICE_ATTR(fan10_input, S_IRUGO, get_fan, NULL, 9);
407static SENSOR_DEVICE_ATTR(fan11_input, S_IRUGO, get_fan, NULL, 10);
408static SENSOR_DEVICE_ATTR(fan12_input, S_IRUGO, get_fan, NULL, 11);
409
410static SENSOR_DEVICE_ATTR(fan7_fault, S_IRUGO, get_fan_fault, NULL, 6);
411static SENSOR_DEVICE_ATTR(fan8_fault, S_IRUGO, get_fan_fault, NULL, 7);
412static SENSOR_DEVICE_ATTR(fan9_fault, S_IRUGO, get_fan_fault, NULL, 8);
413static SENSOR_DEVICE_ATTR(fan10_fault, S_IRUGO, get_fan_fault, NULL, 9);
414static SENSOR_DEVICE_ATTR(fan11_fault, S_IRUGO, get_fan_fault, NULL, 10);
415static SENSOR_DEVICE_ATTR(fan12_fault, S_IRUGO, get_fan_fault, NULL, 11);
416
417static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO,
418 get_fan_target, set_fan_target, 0);
419static SENSOR_DEVICE_ATTR(fan2_target, S_IWUSR | S_IRUGO,
420 get_fan_target, set_fan_target, 1);
421static SENSOR_DEVICE_ATTR(fan3_target, S_IWUSR | S_IRUGO,
422 get_fan_target, set_fan_target, 2);
423static SENSOR_DEVICE_ATTR(fan4_target, S_IWUSR | S_IRUGO,
424 get_fan_target, set_fan_target, 3);
425static SENSOR_DEVICE_ATTR(fan5_target, S_IWUSR | S_IRUGO,
426 get_fan_target, set_fan_target, 4);
427static SENSOR_DEVICE_ATTR(fan6_target, S_IWUSR | S_IRUGO,
428 get_fan_target, set_fan_target, 5);
429
430static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 0);
431static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 1);
432static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 2);
433static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 3);
434static SENSOR_DEVICE_ATTR(pwm5, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 4);
435static SENSOR_DEVICE_ATTR(pwm6, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 5);
436
437static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
438 get_pwm_enable, set_pwm_enable, 0);
439static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
440 get_pwm_enable, set_pwm_enable, 1);
441static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO,
442 get_pwm_enable, set_pwm_enable, 2);
443static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO,
444 get_pwm_enable, set_pwm_enable, 3);
445static SENSOR_DEVICE_ATTR(pwm5_enable, S_IWUSR | S_IRUGO,
446 get_pwm_enable, set_pwm_enable, 4);
447static SENSOR_DEVICE_ATTR(pwm6_enable, S_IWUSR | S_IRUGO,
448 get_pwm_enable, set_pwm_enable, 5);
449
450static struct attribute *max31790_attrs[] = {
451 &sensor_dev_attr_fan1_input.dev_attr.attr,
452 &sensor_dev_attr_fan2_input.dev_attr.attr,
453 &sensor_dev_attr_fan3_input.dev_attr.attr,
454 &sensor_dev_attr_fan4_input.dev_attr.attr,
455 &sensor_dev_attr_fan5_input.dev_attr.attr,
456 &sensor_dev_attr_fan6_input.dev_attr.attr,
457
458 &sensor_dev_attr_fan1_fault.dev_attr.attr,
459 &sensor_dev_attr_fan2_fault.dev_attr.attr,
460 &sensor_dev_attr_fan3_fault.dev_attr.attr,
461 &sensor_dev_attr_fan4_fault.dev_attr.attr,
462 &sensor_dev_attr_fan5_fault.dev_attr.attr,
463 &sensor_dev_attr_fan6_fault.dev_attr.attr,
464
465 &sensor_dev_attr_fan7_input.dev_attr.attr,
466 &sensor_dev_attr_fan8_input.dev_attr.attr,
467 &sensor_dev_attr_fan9_input.dev_attr.attr,
468 &sensor_dev_attr_fan10_input.dev_attr.attr,
469 &sensor_dev_attr_fan11_input.dev_attr.attr,
470 &sensor_dev_attr_fan12_input.dev_attr.attr,
471
472 &sensor_dev_attr_fan7_fault.dev_attr.attr,
473 &sensor_dev_attr_fan8_fault.dev_attr.attr,
474 &sensor_dev_attr_fan9_fault.dev_attr.attr,
475 &sensor_dev_attr_fan10_fault.dev_attr.attr,
476 &sensor_dev_attr_fan11_fault.dev_attr.attr,
477 &sensor_dev_attr_fan12_fault.dev_attr.attr,
478
479 &sensor_dev_attr_fan1_target.dev_attr.attr,
480 &sensor_dev_attr_fan2_target.dev_attr.attr,
481 &sensor_dev_attr_fan3_target.dev_attr.attr,
482 &sensor_dev_attr_fan4_target.dev_attr.attr,
483 &sensor_dev_attr_fan5_target.dev_attr.attr,
484 &sensor_dev_attr_fan6_target.dev_attr.attr,
485
486 &sensor_dev_attr_pwm1.dev_attr.attr,
487 &sensor_dev_attr_pwm2.dev_attr.attr,
488 &sensor_dev_attr_pwm3.dev_attr.attr,
489 &sensor_dev_attr_pwm4.dev_attr.attr,
490 &sensor_dev_attr_pwm5.dev_attr.attr,
491 &sensor_dev_attr_pwm6.dev_attr.attr,
492
493 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
494 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
495 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
496 &sensor_dev_attr_pwm4_enable.dev_attr.attr,
497 &sensor_dev_attr_pwm5_enable.dev_attr.attr,
498 &sensor_dev_attr_pwm6_enable.dev_attr.attr,
499 NULL
500};
501
502static umode_t max31790_attrs_visible(struct kobject *kobj,
503 struct attribute *a, int n)
504{
505 struct device *dev = container_of(kobj, struct device, kobj);
506 struct max31790_data *data = dev_get_drvdata(dev);
507 struct device_attribute *devattr =
508 container_of(a, struct device_attribute, attr);
509 int index = to_sensor_dev_attr(devattr)->index % NR_CHANNEL;
510 u8 fan_config;
511
512 fan_config = data->fan_config[index];
513
514 if (n >= NR_CHANNEL * 2 && n < NR_CHANNEL * 4 &&
515 !(fan_config & MAX31790_FAN_CFG_TACH_INPUT))
516 return 0;
517 if (n >= NR_CHANNEL * 4 && (fan_config & MAX31790_FAN_CFG_TACH_INPUT))
518 return 0;
519
520 return a->mode;
521}
522
523static const struct attribute_group max31790_group = {
524 .attrs = max31790_attrs,
525 .is_visible = max31790_attrs_visible,
526};
527__ATTRIBUTE_GROUPS(max31790);
528
529static int max31790_init_client(struct i2c_client *client,
530 struct max31790_data *data)
531{
532 int i, rv;
533
534 for (i = 0; i < NR_CHANNEL; i++) {
535 rv = i2c_smbus_read_byte_data(client,
536 MAX31790_REG_FAN_CONFIG(i));
537 if (rv < 0)
538 return rv;
539 data->fan_config[i] = rv;
540
541 rv = i2c_smbus_read_byte_data(client,
542 MAX31790_REG_FAN_DYNAMICS(i));
543 if (rv < 0)
544 return rv;
545 data->fan_dynamics[i] = rv;
546 }
547
548 return 0;
549}
550
551static int max31790_probe(struct i2c_client *client,
552 const struct i2c_device_id *id)
553{
554 struct i2c_adapter *adapter = client->adapter;
555 struct device *dev = &client->dev;
556 struct max31790_data *data;
557 struct device *hwmon_dev;
558 int err;
559
560 if (!i2c_check_functionality(adapter,
561 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
562 return -ENODEV;
563
564 data = devm_kzalloc(dev, sizeof(struct max31790_data), GFP_KERNEL);
565 if (!data)
566 return -ENOMEM;
567
568 data->client = client;
569 mutex_init(&data->update_lock);
570
571 /*
572 * Initialize the max31790 chip
573 */
574 err = max31790_init_client(client, data);
575 if (err)
576 return err;
577
578 hwmon_dev = devm_hwmon_device_register_with_groups(dev,
579 client->name, data, max31790_groups);
580
581 return PTR_ERR_OR_ZERO(hwmon_dev);
582}
583
584static const struct i2c_device_id max31790_id[] = {
585 { "max31790", 0 },
586 { }
587};
588MODULE_DEVICE_TABLE(i2c, max31790_id);
589
590static struct i2c_driver max31790_driver = {
591 .class = I2C_CLASS_HWMON,
592 .probe = max31790_probe,
593 .driver = {
594 .name = "max31790",
595 },
596 .id_table = max31790_id,
597};
598
599module_i2c_driver(max31790_driver);
600
601MODULE_AUTHOR("Il Han <corone.il.han@gmail.com>");
602MODULE_DESCRIPTION("MAX31790 sensor driver");
603MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 8b4fa55e46c6..d7ebdf8651f5 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -515,16 +515,24 @@ static const char *const nct6779_temp_label[] = {
515 "PCH_DIM1_TEMP", 515 "PCH_DIM1_TEMP",
516 "PCH_DIM2_TEMP", 516 "PCH_DIM2_TEMP",
517 "PCH_DIM3_TEMP", 517 "PCH_DIM3_TEMP",
518 "BYTE_TEMP" 518 "BYTE_TEMP",
519 "",
520 "",
521 "",
522 "",
523 "Virtual_TEMP"
519}; 524};
520 525
521static const u16 NCT6779_REG_TEMP_ALTERNATE[ARRAY_SIZE(nct6779_temp_label) - 1] 526#define NCT6779_NUM_LABELS (ARRAY_SIZE(nct6779_temp_label) - 5)
527#define NCT6791_NUM_LABELS ARRAY_SIZE(nct6779_temp_label)
528
529static const u16 NCT6779_REG_TEMP_ALTERNATE[NCT6791_NUM_LABELS - 1]
522 = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0, 530 = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0,
523 0, 0, 0, 0, 0, 0, 0, 0, 531 0, 0, 0, 0, 0, 0, 0, 0,
524 0, 0x400, 0x401, 0x402, 0x404, 0x405, 0x406, 0x407, 532 0, 0x400, 0x401, 0x402, 0x404, 0x405, 0x406, 0x407,
525 0x408, 0 }; 533 0x408, 0 };
526 534
527static const u16 NCT6779_REG_TEMP_CRIT[ARRAY_SIZE(nct6779_temp_label) - 1] 535static const u16 NCT6779_REG_TEMP_CRIT[NCT6791_NUM_LABELS - 1]
528 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x709, 0x70a }; 536 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x709, 0x70a };
529 537
530/* NCT6791 specific data */ 538/* NCT6791 specific data */
@@ -557,6 +565,76 @@ static const u16 NCT6792_REG_TEMP_MON[] = {
557static const u16 NCT6792_REG_BEEP[NUM_REG_BEEP] = { 565static const u16 NCT6792_REG_BEEP[NUM_REG_BEEP] = {
558 0xb2, 0xb3, 0xb4, 0xb5, 0xbf }; 566 0xb2, 0xb3, 0xb4, 0xb5, 0xbf };
559 567
568static const char *const nct6792_temp_label[] = {
569 "",
570 "SYSTIN",
571 "CPUTIN",
572 "AUXTIN0",
573 "AUXTIN1",
574 "AUXTIN2",
575 "AUXTIN3",
576 "",
577 "SMBUSMASTER 0",
578 "SMBUSMASTER 1",
579 "SMBUSMASTER 2",
580 "SMBUSMASTER 3",
581 "SMBUSMASTER 4",
582 "SMBUSMASTER 5",
583 "SMBUSMASTER 6",
584 "SMBUSMASTER 7",
585 "PECI Agent 0",
586 "PECI Agent 1",
587 "PCH_CHIP_CPU_MAX_TEMP",
588 "PCH_CHIP_TEMP",
589 "PCH_CPU_TEMP",
590 "PCH_MCH_TEMP",
591 "PCH_DIM0_TEMP",
592 "PCH_DIM1_TEMP",
593 "PCH_DIM2_TEMP",
594 "PCH_DIM3_TEMP",
595 "BYTE_TEMP",
596 "PECI Agent 0 Calibration",
597 "PECI Agent 1 Calibration",
598 "",
599 "",
600 "Virtual_TEMP"
601};
602
603static const char *const nct6793_temp_label[] = {
604 "",
605 "SYSTIN",
606 "CPUTIN",
607 "AUXTIN0",
608 "AUXTIN1",
609 "AUXTIN2",
610 "AUXTIN3",
611 "",
612 "SMBUSMASTER 0",
613 "SMBUSMASTER 1",
614 "",
615 "",
616 "",
617 "",
618 "",
619 "",
620 "PECI Agent 0",
621 "PECI Agent 1",
622 "PCH_CHIP_CPU_MAX_TEMP",
623 "PCH_CHIP_TEMP",
624 "PCH_CPU_TEMP",
625 "PCH_MCH_TEMP",
626 "Agent0 Dimm0 ",
627 "Agent0 Dimm1",
628 "Agent1 Dimm0",
629 "Agent1 Dimm1",
630 "BYTE_TEMP0",
631 "BYTE_TEMP1",
632 "PECI Agent 0 Calibration",
633 "PECI Agent 1 Calibration",
634 "",
635 "Virtual_TEMP"
636};
637
560/* NCT6102D/NCT6106D specific data */ 638/* NCT6102D/NCT6106D specific data */
561 639
562#define NCT6106_REG_VBAT 0x318 640#define NCT6106_REG_VBAT 0x318
@@ -3605,7 +3683,7 @@ static int nct6775_probe(struct platform_device *pdev)
3605 data->speed_tolerance_limit = 63; 3683 data->speed_tolerance_limit = 63;
3606 3684
3607 data->temp_label = nct6779_temp_label; 3685 data->temp_label = nct6779_temp_label;
3608 data->temp_label_num = ARRAY_SIZE(nct6779_temp_label); 3686 data->temp_label_num = NCT6779_NUM_LABELS;
3609 3687
3610 data->REG_CONFIG = NCT6775_REG_CONFIG; 3688 data->REG_CONFIG = NCT6775_REG_CONFIG;
3611 data->REG_VBAT = NCT6775_REG_VBAT; 3689 data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3682,8 +3760,19 @@ static int nct6775_probe(struct platform_device *pdev)
3682 data->tolerance_mask = 0x07; 3760 data->tolerance_mask = 0x07;
3683 data->speed_tolerance_limit = 63; 3761 data->speed_tolerance_limit = 63;
3684 3762
3685 data->temp_label = nct6779_temp_label; 3763 switch (data->kind) {
3686 data->temp_label_num = ARRAY_SIZE(nct6779_temp_label); 3764 default:
3765 case nct6791:
3766 data->temp_label = nct6779_temp_label;
3767 break;
3768 case nct6792:
3769 data->temp_label = nct6792_temp_label;
3770 break;
3771 case nct6793:
3772 data->temp_label = nct6793_temp_label;
3773 break;
3774 }
3775 data->temp_label_num = NCT6791_NUM_LABELS;
3687 3776
3688 data->REG_CONFIG = NCT6775_REG_CONFIG; 3777 data->REG_CONFIG = NCT6775_REG_CONFIG;
3689 data->REG_VBAT = NCT6775_REG_VBAT; 3778 data->REG_VBAT = NCT6775_REG_VBAT;
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 30059c1df2a3..5801227b97ab 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -669,8 +669,6 @@ mv64xxx_i2c_can_offload(struct mv64xxx_i2c_data *drv_data)
669 struct i2c_msg *msgs = drv_data->msgs; 669 struct i2c_msg *msgs = drv_data->msgs;
670 int num = drv_data->num_msgs; 670 int num = drv_data->num_msgs;
671 671
672 return false;
673
674 if (!drv_data->offload_enabled) 672 if (!drv_data->offload_enabled)
675 return false; 673 return false;
676 674
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index e814a36d9b78..6f8b446be5b0 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -600,7 +600,7 @@ static int i2c_pnx_controller_suspend(struct device *dev)
600{ 600{
601 struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); 601 struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
602 602
603 clk_disable(alg_data->clk); 603 clk_disable_unprepare(alg_data->clk);
604 604
605 return 0; 605 return 0;
606} 606}
@@ -609,7 +609,7 @@ static int i2c_pnx_controller_resume(struct device *dev)
609{ 609{
610 struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); 610 struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
611 611
612 return clk_enable(alg_data->clk); 612 return clk_prepare_enable(alg_data->clk);
613} 613}
614 614
615static SIMPLE_DEV_PM_OPS(i2c_pnx_pm, 615static SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
@@ -672,7 +672,7 @@ static int i2c_pnx_probe(struct platform_device *pdev)
672 if (IS_ERR(alg_data->ioaddr)) 672 if (IS_ERR(alg_data->ioaddr))
673 return PTR_ERR(alg_data->ioaddr); 673 return PTR_ERR(alg_data->ioaddr);
674 674
675 ret = clk_enable(alg_data->clk); 675 ret = clk_prepare_enable(alg_data->clk);
676 if (ret) 676 if (ret)
677 return ret; 677 return ret;
678 678
@@ -726,7 +726,7 @@ static int i2c_pnx_probe(struct platform_device *pdev)
726 return 0; 726 return 0;
727 727
728out_clock: 728out_clock:
729 clk_disable(alg_data->clk); 729 clk_disable_unprepare(alg_data->clk);
730 return ret; 730 return ret;
731} 731}
732 732
@@ -735,7 +735,7 @@ static int i2c_pnx_remove(struct platform_device *pdev)
735 struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); 735 struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
736 736
737 i2c_del_adapter(&alg_data->adapter); 737 i2c_del_adapter(&alg_data->adapter);
738 clk_disable(alg_data->clk); 738 clk_disable_unprepare(alg_data->clk);
739 739
740 return 0; 740 return 0;
741} 741}
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index ff30f8806880..fb9311110424 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -149,8 +149,6 @@
149#define ST_ACCEL_4_BDU_MASK 0x40 149#define ST_ACCEL_4_BDU_MASK 0x40
150#define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21 150#define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21
151#define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04 151#define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04
152#define ST_ACCEL_4_IG1_EN_ADDR 0x21
153#define ST_ACCEL_4_IG1_EN_MASK 0x08
154#define ST_ACCEL_4_MULTIREAD_BIT true 152#define ST_ACCEL_4_MULTIREAD_BIT true
155 153
156/* CUSTOM VALUES FOR SENSOR 5 */ 154/* CUSTOM VALUES FOR SENSOR 5 */
@@ -489,10 +487,6 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
489 .drdy_irq = { 487 .drdy_irq = {
490 .addr = ST_ACCEL_4_DRDY_IRQ_ADDR, 488 .addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
491 .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK, 489 .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
492 .ig1 = {
493 .en_addr = ST_ACCEL_4_IG1_EN_ADDR,
494 .en_mask = ST_ACCEL_4_IG1_EN_MASK,
495 },
496 }, 490 },
497 .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT, 491 .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
498 .bootime = 2, /* guess */ 492 .bootime = 2, /* guess */
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index ebe415f10640..0c74869a540a 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -45,13 +45,18 @@
45#include <linux/types.h> 45#include <linux/types.h>
46#include <linux/gfp.h> 46#include <linux/gfp.h>
47#include <linux/err.h> 47#include <linux/err.h>
48#include <linux/regulator/consumer.h>
48 49
49#include <linux/iio/iio.h> 50#include <linux/iio/iio.h>
50 51
52#define TWL4030_USB_SEL_MADC_MCPC (1<<3)
53#define TWL4030_USB_CARKIT_ANA_CTRL 0xBB
54
51/** 55/**
52 * struct twl4030_madc_data - a container for madc info 56 * struct twl4030_madc_data - a container for madc info
53 * @dev: Pointer to device structure for madc 57 * @dev: Pointer to device structure for madc
54 * @lock: Mutex protecting this data structure 58 * @lock: Mutex protecting this data structure
59 * @regulator: Pointer to bias regulator for madc
55 * @requests: Array of request struct corresponding to SW1, SW2 and RT 60 * @requests: Array of request struct corresponding to SW1, SW2 and RT
56 * @use_second_irq: IRQ selection (main or co-processor) 61 * @use_second_irq: IRQ selection (main or co-processor)
57 * @imr: Interrupt mask register of MADC 62 * @imr: Interrupt mask register of MADC
@@ -60,6 +65,7 @@
60struct twl4030_madc_data { 65struct twl4030_madc_data {
61 struct device *dev; 66 struct device *dev;
62 struct mutex lock; /* mutex protecting this data structure */ 67 struct mutex lock; /* mutex protecting this data structure */
68 struct regulator *usb3v1;
63 struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS]; 69 struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
64 bool use_second_irq; 70 bool use_second_irq;
65 u8 imr; 71 u8 imr;
@@ -841,6 +847,32 @@ static int twl4030_madc_probe(struct platform_device *pdev)
841 } 847 }
842 twl4030_madc = madc; 848 twl4030_madc = madc;
843 849
850 /* Configure MADC[3:6] */
851 ret = twl_i2c_read_u8(TWL_MODULE_USB, &regval,
852 TWL4030_USB_CARKIT_ANA_CTRL);
853 if (ret) {
854 dev_err(&pdev->dev, "unable to read reg CARKIT_ANA_CTRL 0x%X\n",
855 TWL4030_USB_CARKIT_ANA_CTRL);
856 goto err_i2c;
857 }
858 regval |= TWL4030_USB_SEL_MADC_MCPC;
859 ret = twl_i2c_write_u8(TWL_MODULE_USB, regval,
860 TWL4030_USB_CARKIT_ANA_CTRL);
861 if (ret) {
862 dev_err(&pdev->dev, "unable to write reg CARKIT_ANA_CTRL 0x%X\n",
863 TWL4030_USB_CARKIT_ANA_CTRL);
864 goto err_i2c;
865 }
866
867 /* Enable 3v1 bias regulator for MADC[3:6] */
868 madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
869 if (IS_ERR(madc->usb3v1))
870 return -ENODEV;
871
872 ret = regulator_enable(madc->usb3v1);
873 if (ret)
874 dev_err(madc->dev, "could not enable 3v1 bias regulator\n");
875
844 ret = iio_device_register(iio_dev); 876 ret = iio_device_register(iio_dev);
845 if (ret) { 877 if (ret) {
846 dev_err(&pdev->dev, "could not register iio device\n"); 878 dev_err(&pdev->dev, "could not register iio device\n");
@@ -866,6 +898,8 @@ static int twl4030_madc_remove(struct platform_device *pdev)
866 twl4030_madc_set_current_generator(madc, 0, 0); 898 twl4030_madc_set_current_generator(madc, 0, 0);
867 twl4030_madc_set_power(madc, 0); 899 twl4030_madc_set_power(madc, 0);
868 900
901 regulator_disable(madc->usb3v1);
902
869 return 0; 903 return 0;
870} 904}
871 905
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 8f66c67ff0df..87471ef37198 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -508,12 +508,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
508 memset(&gid_attr, 0, sizeof(gid_attr)); 508 memset(&gid_attr, 0, sizeof(gid_attr));
509 gid_attr.ndev = ndev; 509 gid_attr.ndev = ndev;
510 510
511 mutex_lock(&table->lock);
511 ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT); 512 ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT);
512 513
513 /* Coudn't find default GID location */ 514 /* Coudn't find default GID location */
514 WARN_ON(ix < 0); 515 WARN_ON(ix < 0);
515 516
516 mutex_lock(&table->lock);
517 if (!__ib_cache_gid_get(ib_dev, port, ix, 517 if (!__ib_cache_gid_get(ib_dev, port, ix,
518 &current_gid, &current_gid_attr) && 518 &current_gid, &current_gid_attr) &&
519 mode == IB_CACHE_GID_DEFAULT_MODE_SET && 519 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index ea4db9c1d44f..4f918b929eca 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -835,6 +835,11 @@ retest:
835 case IB_CM_SIDR_REQ_RCVD: 835 case IB_CM_SIDR_REQ_RCVD:
836 spin_unlock_irq(&cm_id_priv->lock); 836 spin_unlock_irq(&cm_id_priv->lock);
837 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 837 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
838 spin_lock_irq(&cm.lock);
839 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
840 rb_erase(&cm_id_priv->sidr_id_node,
841 &cm.remote_sidr_table);
842 spin_unlock_irq(&cm.lock);
838 break; 843 break;
839 case IB_CM_REQ_SENT: 844 case IB_CM_REQ_SENT:
840 case IB_CM_MRA_REQ_RCVD: 845 case IB_CM_MRA_REQ_RCVD:
@@ -3172,7 +3177,10 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3172 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3177 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3173 3178
3174 spin_lock_irqsave(&cm.lock, flags); 3179 spin_lock_irqsave(&cm.lock, flags);
3175 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 3180 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3181 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3182 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3183 }
3176 spin_unlock_irqrestore(&cm.lock, flags); 3184 spin_unlock_irqrestore(&cm.lock, flags);
3177 return 0; 3185 return 0;
3178 3186
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 59a2dafc8c57..36b12d560e17 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1067,14 +1067,14 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
1067 sizeof(req->local_gid)); 1067 sizeof(req->local_gid));
1068 req->has_gid = true; 1068 req->has_gid = true;
1069 req->service_id = req_param->primary_path->service_id; 1069 req->service_id = req_param->primary_path->service_id;
1070 req->pkey = req_param->bth_pkey; 1070 req->pkey = be16_to_cpu(req_param->primary_path->pkey);
1071 break; 1071 break;
1072 case IB_CM_SIDR_REQ_RECEIVED: 1072 case IB_CM_SIDR_REQ_RECEIVED:
1073 req->device = sidr_param->listen_id->device; 1073 req->device = sidr_param->listen_id->device;
1074 req->port = sidr_param->port; 1074 req->port = sidr_param->port;
1075 req->has_gid = false; 1075 req->has_gid = false;
1076 req->service_id = sidr_param->service_id; 1076 req->service_id = sidr_param->service_id;
1077 req->pkey = sidr_param->bth_pkey; 1077 req->pkey = sidr_param->pkey;
1078 break; 1078 break;
1079 default: 1079 default:
1080 return -EINVAL; 1080 return -EINVAL;
@@ -1324,7 +1324,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
1324 bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id), 1324 bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id),
1325 cma_port_from_service_id(req.service_id)); 1325 cma_port_from_service_id(req.service_id));
1326 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); 1326 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
1327 if (IS_ERR(id_priv)) { 1327 if (IS_ERR(id_priv) && *net_dev) {
1328 dev_put(*net_dev); 1328 dev_put(*net_dev);
1329 *net_dev = NULL; 1329 *net_dev = NULL;
1330 } 1330 }
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 6b24cba1e474..178f98482e13 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -250,25 +250,44 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
250 u8 port, struct net_device *ndev) 250 u8 port, struct net_device *ndev)
251{ 251{
252 struct in_device *in_dev; 252 struct in_device *in_dev;
253 struct sin_list {
254 struct list_head list;
255 struct sockaddr_in ip;
256 };
257 struct sin_list *sin_iter;
258 struct sin_list *sin_temp;
253 259
260 LIST_HEAD(sin_list);
254 if (ndev->reg_state >= NETREG_UNREGISTERING) 261 if (ndev->reg_state >= NETREG_UNREGISTERING)
255 return; 262 return;
256 263
257 in_dev = in_dev_get(ndev); 264 rcu_read_lock();
258 if (!in_dev) 265 in_dev = __in_dev_get_rcu(ndev);
266 if (!in_dev) {
267 rcu_read_unlock();
259 return; 268 return;
269 }
260 270
261 for_ifa(in_dev) { 271 for_ifa(in_dev) {
262 struct sockaddr_in ip; 272 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
263 273
264 ip.sin_family = AF_INET; 274 if (!entry) {
265 ip.sin_addr.s_addr = ifa->ifa_address; 275 pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
266 update_gid_ip(GID_ADD, ib_dev, port, ndev, 276 continue;
267 (struct sockaddr *)&ip); 277 }
278 entry->ip.sin_family = AF_INET;
279 entry->ip.sin_addr.s_addr = ifa->ifa_address;
280 list_add_tail(&entry->list, &sin_list);
268 } 281 }
269 endfor_ifa(in_dev); 282 endfor_ifa(in_dev);
283 rcu_read_unlock();
270 284
271 in_dev_put(in_dev); 285 list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
286 update_gid_ip(GID_ADD, ib_dev, port, ndev,
287 (struct sockaddr *)&sin_iter->ip);
288 list_del(&sin_iter->list);
289 kfree(sin_iter);
290 }
272} 291}
273 292
274static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, 293static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index a53fc9b01c69..30467d10df91 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1624,11 +1624,16 @@ static int ucma_open(struct inode *inode, struct file *filp)
1624 if (!file) 1624 if (!file)
1625 return -ENOMEM; 1625 return -ENOMEM;
1626 1626
1627 file->close_wq = create_singlethread_workqueue("ucma_close_id");
1628 if (!file->close_wq) {
1629 kfree(file);
1630 return -ENOMEM;
1631 }
1632
1627 INIT_LIST_HEAD(&file->event_list); 1633 INIT_LIST_HEAD(&file->event_list);
1628 INIT_LIST_HEAD(&file->ctx_list); 1634 INIT_LIST_HEAD(&file->ctx_list);
1629 init_waitqueue_head(&file->poll_wait); 1635 init_waitqueue_head(&file->poll_wait);
1630 mutex_init(&file->mut); 1636 mutex_init(&file->mut);
1631 file->close_wq = create_singlethread_workqueue("ucma_close_id");
1632 1637
1633 filp->private_data = file; 1638 filp->private_data = file;
1634 file->filp = filp; 1639 file->filp = filp;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 4d246861d692..41e6cb501e6a 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -100,7 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
100#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ 100#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
101#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 101#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
102 6-byte ALPS packet */ 102 6-byte ALPS packet */
103#define ALPS_DELL 0x100 /* device is a Dell laptop */ 103#define ALPS_STICK_BITS 0x100 /* separate stick button bits */
104#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ 104#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
105 105
106static const struct alps_model_info alps_model_data[] = { 106static const struct alps_model_info alps_model_data[] = {
@@ -159,6 +159,43 @@ static const struct alps_protocol_info alps_v8_protocol_data = {
159 ALPS_PROTO_V8, 0x18, 0x18, 0 159 ALPS_PROTO_V8, 0x18, 0x18, 0
160}; 160};
161 161
162/*
163 * Some v2 models report the stick buttons in separate bits
164 */
165static const struct dmi_system_id alps_dmi_has_separate_stick_buttons[] = {
166#if defined(CONFIG_DMI) && defined(CONFIG_X86)
167 {
168 /* Extrapolated from other entries */
169 .matches = {
170 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
171 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D420"),
172 },
173 },
174 {
175 /* Reported-by: Hans de Bruin <jmdebruin@xmsnet.nl> */
176 .matches = {
177 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
178 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D430"),
179 },
180 },
181 {
182 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
183 .matches = {
184 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
185 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D620"),
186 },
187 },
188 {
189 /* Extrapolated from other entries */
190 .matches = {
191 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
192 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D630"),
193 },
194 },
195#endif
196 { }
197};
198
162static void alps_set_abs_params_st(struct alps_data *priv, 199static void alps_set_abs_params_st(struct alps_data *priv,
163 struct input_dev *dev1); 200 struct input_dev *dev1);
164static void alps_set_abs_params_semi_mt(struct alps_data *priv, 201static void alps_set_abs_params_semi_mt(struct alps_data *priv,
@@ -253,9 +290,8 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
253 return; 290 return;
254 } 291 }
255 292
256 /* Dell non interleaved V2 dualpoint has separate stick button bits */ 293 /* Some models have separate stick button bits */
257 if (priv->proto_version == ALPS_PROTO_V2 && 294 if (priv->flags & ALPS_STICK_BITS) {
258 priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
259 left |= packet[0] & 1; 295 left |= packet[0] & 1;
260 right |= packet[0] & 2; 296 right |= packet[0] & 2;
261 middle |= packet[0] & 4; 297 middle |= packet[0] & 4;
@@ -2552,8 +2588,6 @@ static int alps_set_protocol(struct psmouse *psmouse,
2552 priv->byte0 = protocol->byte0; 2588 priv->byte0 = protocol->byte0;
2553 priv->mask0 = protocol->mask0; 2589 priv->mask0 = protocol->mask0;
2554 priv->flags = protocol->flags; 2590 priv->flags = protocol->flags;
2555 if (dmi_name_in_vendors("Dell"))
2556 priv->flags |= ALPS_DELL;
2557 2591
2558 priv->x_max = 2000; 2592 priv->x_max = 2000;
2559 priv->y_max = 1400; 2593 priv->y_max = 1400;
@@ -2568,6 +2602,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
2568 priv->set_abs_params = alps_set_abs_params_st; 2602 priv->set_abs_params = alps_set_abs_params_st;
2569 priv->x_max = 1023; 2603 priv->x_max = 1023;
2570 priv->y_max = 767; 2604 priv->y_max = 767;
2605 if (dmi_check_system(alps_dmi_has_separate_stick_buttons))
2606 priv->flags |= ALPS_STICK_BITS;
2571 break; 2607 break;
2572 2608
2573 case ALPS_PROTO_V3: 2609 case ALPS_PROTO_V3:
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 600dcceff542..deb14c12ae8b 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1006,6 +1006,7 @@ config TOUCHSCREEN_SUN4I
1006config TOUCHSCREEN_SUR40 1006config TOUCHSCREEN_SUR40
1007 tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen" 1007 tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
1008 depends on USB && MEDIA_USB_SUPPORT && HAS_DMA 1008 depends on USB && MEDIA_USB_SUPPORT && HAS_DMA
1009 depends on VIDEO_V4L2
1009 select INPUT_POLLDEV 1010 select INPUT_POLLDEV
1010 select VIDEOBUF2_DMA_SG 1011 select VIDEOBUF2_DMA_SG
1011 help 1012 help
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 24d704cd9f88..7fbb3b0c8571 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -139,14 +139,14 @@ static void lpc32xx_stop_tsc(struct lpc32xx_tsc *tsc)
139 tsc_readl(tsc, LPC32XX_TSC_CON) & 139 tsc_readl(tsc, LPC32XX_TSC_CON) &
140 ~LPC32XX_TSC_ADCCON_AUTO_EN); 140 ~LPC32XX_TSC_ADCCON_AUTO_EN);
141 141
142 clk_disable(tsc->clk); 142 clk_disable_unprepare(tsc->clk);
143} 143}
144 144
145static void lpc32xx_setup_tsc(struct lpc32xx_tsc *tsc) 145static void lpc32xx_setup_tsc(struct lpc32xx_tsc *tsc)
146{ 146{
147 u32 tmp; 147 u32 tmp;
148 148
149 clk_enable(tsc->clk); 149 clk_prepare_enable(tsc->clk);
150 150
151 tmp = tsc_readl(tsc, LPC32XX_TSC_CON) & ~LPC32XX_TSC_ADCCON_POWER_UP; 151 tmp = tsc_readl(tsc, LPC32XX_TSC_CON) & ~LPC32XX_TSC_ADCCON_POWER_UP;
152 152
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 08d2775887f7..532e2a211fe1 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1974,8 +1974,8 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
1974static void clear_dte_entry(u16 devid) 1974static void clear_dte_entry(u16 devid)
1975{ 1975{
1976 /* remove entry from the device table seen by the hardware */ 1976 /* remove entry from the device table seen by the hardware */
1977 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; 1977 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1978 amd_iommu_dev_table[devid].data[1] = 0; 1978 amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
1979 1979
1980 amd_iommu_apply_erratum_63(devid); 1980 amd_iommu_apply_erratum_63(devid);
1981} 1981}
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index f65908841be0..c9b64722f623 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -295,6 +295,7 @@
295#define IOMMU_PTE_IR (1ULL << 61) 295#define IOMMU_PTE_IR (1ULL << 61)
296#define IOMMU_PTE_IW (1ULL << 62) 296#define IOMMU_PTE_IW (1ULL << 62)
297 297
298#define DTE_FLAG_MASK (0x3ffULL << 32)
298#define DTE_FLAG_IOTLB (0x01UL << 32) 299#define DTE_FLAG_IOTLB (0x01UL << 32)
299#define DTE_FLAG_GV (0x01ULL << 55) 300#define DTE_FLAG_GV (0x01ULL << 55)
300#define DTE_GLX_SHIFT (56) 301#define DTE_GLX_SHIFT (56)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 1131664b918b..d21d4edf7236 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -516,6 +516,13 @@ static void do_fault(struct work_struct *work)
516 goto out; 516 goto out;
517 } 517 }
518 518
519 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) {
520 /* handle_mm_fault would BUG_ON() */
521 up_read(&mm->mmap_sem);
522 handle_fault_error(fault);
523 goto out;
524 }
525
519 ret = handle_mm_fault(mm, vma, address, write); 526 ret = handle_mm_fault(mm, vma, address, write);
520 if (ret & VM_FAULT_ERROR) { 527 if (ret & VM_FAULT_ERROR) {
521 /* failed to service fault */ 528 /* failed to service fault */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 35365f046923..d65cf42399e8 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2115,15 +2115,19 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2115 return -ENOMEM; 2115 return -ENOMEM;
2116 /* It is large page*/ 2116 /* It is large page*/
2117 if (largepage_lvl > 1) { 2117 if (largepage_lvl > 1) {
2118 unsigned long nr_superpages, end_pfn;
2119
2118 pteval |= DMA_PTE_LARGE_PAGE; 2120 pteval |= DMA_PTE_LARGE_PAGE;
2119 lvl_pages = lvl_to_nr_pages(largepage_lvl); 2121 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2122
2123 nr_superpages = sg_res / lvl_pages;
2124 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2125
2120 /* 2126 /*
2121 * Ensure that old small page tables are 2127 * Ensure that old small page tables are
2122 * removed to make room for superpage, 2128 * removed to make room for superpage(s).
2123 * if they exist.
2124 */ 2129 */
2125 dma_pte_free_pagetable(domain, iov_pfn, 2130 dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
2126 iov_pfn + lvl_pages - 1);
2127 } else { 2131 } else {
2128 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; 2132 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2129 } 2133 }
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 27b52c8729cd..4d7294e5d982 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -123,6 +123,7 @@ config RENESAS_INTC_IRQPIN
123 123
124config RENESAS_IRQC 124config RENESAS_IRQC
125 bool 125 bool
126 select GENERIC_IRQ_CHIP
126 select IRQ_DOMAIN 127 select IRQ_DOMAIN
127 128
128config ST_IRQCHIP 129config ST_IRQCHIP
@@ -187,3 +188,8 @@ config IMX_GPCV2
187 select IRQ_DOMAIN 188 select IRQ_DOMAIN
188 help 189 help
189 Enables the wakeup IRQs for IMX platforms with GPCv2 block 190 Enables the wakeup IRQs for IMX platforms with GPCv2 block
191
192config IRQ_MXS
193 def_bool y if MACH_ASM9260 || ARCH_MXS
194 select IRQ_DOMAIN
195 select STMP_DEVICE
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index bb3048f00e64..177f78f6e6d6 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
6obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o 6obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
7obj-$(CONFIG_ARCH_MMP) += irq-mmp.o 7obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
8obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o 8obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
9obj-$(CONFIG_ARCH_MXS) += irq-mxs.o 9obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
10obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o 10obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
11obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o 11obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
12obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o 12obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
diff --git a/drivers/irqchip/alphascale_asm9260-icoll.h b/drivers/irqchip/alphascale_asm9260-icoll.h
new file mode 100644
index 000000000000..5cec108ee204
--- /dev/null
+++ b/drivers/irqchip/alphascale_asm9260-icoll.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef _ALPHASCALE_ASM9260_ICOLL_H
11#define _ALPHASCALE_ASM9260_ICOLL_H
12
13#define ASM9260_NUM_IRQS 64
14/*
15 * this device provide 4 offsets for each register:
16 * 0x0 - plain read write mode
17 * 0x4 - set mode, OR logic.
18 * 0x8 - clr mode, XOR logic.
19 * 0xc - togle mode.
20 */
21
22#define ASM9260_HW_ICOLL_VECTOR 0x0000
23/*
24 * bits 31:2
25 * This register presents the vector address for the interrupt currently
26 * active on the CPU IRQ input. Writing to this register notifies the
27 * interrupt collector that the interrupt service routine for the current
28 * interrupt has been entered.
29 * The exception trap should have a LDPC instruction from this address:
30 * LDPC ASM9260_HW_ICOLL_VECTOR_ADDR; IRQ exception at 0xffff0018
31 */
32
33/*
34 * The Interrupt Collector Level Acknowledge Register is used by software to
35 * indicate the completion of an interrupt on a specific level.
36 * This register is written at the very end of an interrupt service routine. If
37 * nesting is used then the CPU irq must be turned on before writing to this
38 * register to avoid a race condition in the CPU interrupt hardware.
39 */
40#define ASM9260_HW_ICOLL_LEVELACK 0x0010
41#define ASM9260_BM_LEVELn(nr) BIT(nr)
42
43#define ASM9260_HW_ICOLL_CTRL 0x0020
44/*
45 * ASM9260_BM_CTRL_SFTRST and ASM9260_BM_CTRL_CLKGATE are not available on
46 * asm9260.
47 */
48#define ASM9260_BM_CTRL_SFTRST BIT(31)
49#define ASM9260_BM_CTRL_CLKGATE BIT(30)
50/* disable interrupt level nesting */
51#define ASM9260_BM_CTRL_NO_NESTING BIT(19)
52/*
53 * Set this bit to one enable the RISC32-style read side effect associated with
54 * the vector address register. In this mode, interrupt in-service is signaled
55 * by the read of the ASM9260_HW_ICOLL_VECTOR register to acquire the interrupt
56 * vector address. Set this bit to zero for normal operation, in which the ISR
57 * signals in-service explicitly by means of a write to the
58 * ASM9260_HW_ICOLL_VECTOR register.
59 * 0 - Must Write to Vector register to go in-service.
60 * 1 - Go in-service as a read side effect
61 */
62#define ASM9260_BM_CTRL_ARM_RSE_MODE BIT(18)
63#define ASM9260_BM_CTRL_IRQ_ENABLE BIT(16)
64
65#define ASM9260_HW_ICOLL_STAT_OFFSET 0x0030
66/*
67 * bits 5:0
68 * Vector number of current interrupt. Multiply by 4 and add to vector base
69 * address to obtain the value in ASM9260_HW_ICOLL_VECTOR.
70 */
71
72/*
73 * RAW0 and RAW1 provides a read-only view of the raw interrupt request lines
74 * coming from various parts of the chip. Its purpose is to improve diagnostic
75 * observability.
76 */
77#define ASM9260_HW_ICOLL_RAW0 0x0040
78#define ASM9260_HW_ICOLL_RAW1 0x0050
79
80#define ASM9260_HW_ICOLL_INTERRUPT0 0x0060
81#define ASM9260_HW_ICOLL_INTERRUPTn(n) (0x0060 + ((n) >> 2) * 0x10)
82/*
83 * WARNING: Modifying the priority of an enabled interrupt may result in
84 * undefined behavior.
85 */
86#define ASM9260_BM_INT_PRIORITY_MASK 0x3
87#define ASM9260_BM_INT_ENABLE BIT(2)
88#define ASM9260_BM_INT_SOFTIRQ BIT(3)
89
90#define ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n) (((n) & 0x3) << 3)
91#define ASM9260_BM_ICOLL_INTERRUPTn_ENABLE(n) (1 << (2 + \
92 ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n)))
93
94#define ASM9260_HW_ICOLL_VBASE 0x0160
95/*
96 * bits 31:2
97 * This bitfield holds the upper 30 bits of the base address of the vector
98 * table.
99 */
100
101#define ASM9260_HW_ICOLL_CLEAR0 0x01d0
102#define ASM9260_HW_ICOLL_CLEAR1 0x01e0
103#define ASM9260_HW_ICOLL_CLEARn(n) (((n >> 5) * 0x10) \
104 + SET_REG)
105#define ASM9260_BM_CLEAR_BIT(n) BIT(n & 0x1f)
106
107/* Scratchpad */
108#define ASM9260_HW_ICOLL_UNDEF_VECTOR 0x01f0
109#endif
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index cd7d3bc78e34..ead15be2d20a 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -144,7 +144,7 @@ static int combiner_irq_domain_xlate(struct irq_domain *d,
144 unsigned long *out_hwirq, 144 unsigned long *out_hwirq,
145 unsigned int *out_type) 145 unsigned int *out_type)
146{ 146{
147 if (d->of_node != controller) 147 if (irq_domain_get_of_node(d) != controller)
148 return -EINVAL; 148 return -EINVAL;
149 149
150 if (intsize < 2) 150 if (intsize < 2)
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 655cb967a1f2..389318a3be82 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -317,6 +317,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
317 handle_level_irq); 317 handle_level_irq);
318 } 318 }
319 irq_set_probe(virq); 319 irq_set_probe(virq);
320 irq_clear_status_flags(virq, IRQ_NOAUTOEN);
320 321
321 return 0; 322 return 0;
322} 323}
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 63cd031b2c28..b12a5d58546f 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -114,7 +114,7 @@ int aic_common_irq_domain_xlate(struct irq_domain *d,
114 114
115static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) 115static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
116{ 116{
117 struct device_node *node = domain->of_node; 117 struct device_node *node = irq_domain_get_of_node(domain);
118 struct irq_chip_generic *gc; 118 struct irq_chip_generic *gc;
119 struct aic_chip_data *aic; 119 struct aic_chip_data *aic;
120 struct property *prop; 120 struct property *prop;
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index f6d680485bee..62bb840c613f 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -70,16 +70,15 @@ static struct irq_domain *aic5_domain;
70static asmlinkage void __exception_irq_entry 70static asmlinkage void __exception_irq_entry
71aic5_handle(struct pt_regs *regs) 71aic5_handle(struct pt_regs *regs)
72{ 72{
73 struct irq_domain_chip_generic *dgc = aic5_domain->gc; 73 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0);
74 struct irq_chip_generic *gc = dgc->gc[0];
75 u32 irqnr; 74 u32 irqnr;
76 u32 irqstat; 75 u32 irqstat;
77 76
78 irqnr = irq_reg_readl(gc, AT91_AIC5_IVR); 77 irqnr = irq_reg_readl(bgc, AT91_AIC5_IVR);
79 irqstat = irq_reg_readl(gc, AT91_AIC5_ISR); 78 irqstat = irq_reg_readl(bgc, AT91_AIC5_ISR);
80 79
81 if (!irqstat) 80 if (!irqstat)
82 irq_reg_writel(gc, 0, AT91_AIC5_EOICR); 81 irq_reg_writel(bgc, 0, AT91_AIC5_EOICR);
83 else 82 else
84 handle_domain_irq(aic5_domain, irqnr, regs); 83 handle_domain_irq(aic5_domain, irqnr, regs);
85} 84}
@@ -87,8 +86,7 @@ aic5_handle(struct pt_regs *regs)
87static void aic5_mask(struct irq_data *d) 86static void aic5_mask(struct irq_data *d)
88{ 87{
89 struct irq_domain *domain = d->domain; 88 struct irq_domain *domain = d->domain;
90 struct irq_domain_chip_generic *dgc = domain->gc; 89 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
91 struct irq_chip_generic *bgc = dgc->gc[0];
92 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 90 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
93 91
94 /* 92 /*
@@ -105,8 +103,7 @@ static void aic5_mask(struct irq_data *d)
105static void aic5_unmask(struct irq_data *d) 103static void aic5_unmask(struct irq_data *d)
106{ 104{
107 struct irq_domain *domain = d->domain; 105 struct irq_domain *domain = d->domain;
108 struct irq_domain_chip_generic *dgc = domain->gc; 106 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
109 struct irq_chip_generic *bgc = dgc->gc[0];
110 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 107 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
111 108
112 /* 109 /*
@@ -123,14 +120,13 @@ static void aic5_unmask(struct irq_data *d)
123static int aic5_retrigger(struct irq_data *d) 120static int aic5_retrigger(struct irq_data *d)
124{ 121{
125 struct irq_domain *domain = d->domain; 122 struct irq_domain *domain = d->domain;
126 struct irq_domain_chip_generic *dgc = domain->gc; 123 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
127 struct irq_chip_generic *gc = dgc->gc[0];
128 124
129 /* Enable interrupt on AIC5 */ 125 /* Enable interrupt on AIC5 */
130 irq_gc_lock(gc); 126 irq_gc_lock(bgc);
131 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 127 irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
132 irq_reg_writel(gc, 1, AT91_AIC5_ISCR); 128 irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
133 irq_gc_unlock(gc); 129 irq_gc_unlock(bgc);
134 130
135 return 0; 131 return 0;
136} 132}
@@ -138,18 +134,17 @@ static int aic5_retrigger(struct irq_data *d)
138static int aic5_set_type(struct irq_data *d, unsigned type) 134static int aic5_set_type(struct irq_data *d, unsigned type)
139{ 135{
140 struct irq_domain *domain = d->domain; 136 struct irq_domain *domain = d->domain;
141 struct irq_domain_chip_generic *dgc = domain->gc; 137 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
142 struct irq_chip_generic *gc = dgc->gc[0];
143 unsigned int smr; 138 unsigned int smr;
144 int ret; 139 int ret;
145 140
146 irq_gc_lock(gc); 141 irq_gc_lock(bgc);
147 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 142 irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
148 smr = irq_reg_readl(gc, AT91_AIC5_SMR); 143 smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
149 ret = aic_common_set_type(d, type, &smr); 144 ret = aic_common_set_type(d, type, &smr);
150 if (!ret) 145 if (!ret)
151 irq_reg_writel(gc, smr, AT91_AIC5_SMR); 146 irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
152 irq_gc_unlock(gc); 147 irq_gc_unlock(bgc);
153 148
154 return ret; 149 return ret;
155} 150}
@@ -159,7 +154,7 @@ static void aic5_suspend(struct irq_data *d)
159{ 154{
160 struct irq_domain *domain = d->domain; 155 struct irq_domain *domain = d->domain;
161 struct irq_domain_chip_generic *dgc = domain->gc; 156 struct irq_domain_chip_generic *dgc = domain->gc;
162 struct irq_chip_generic *bgc = dgc->gc[0]; 157 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
163 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 158 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
164 int i; 159 int i;
165 u32 mask; 160 u32 mask;
@@ -183,7 +178,7 @@ static void aic5_resume(struct irq_data *d)
183{ 178{
184 struct irq_domain *domain = d->domain; 179 struct irq_domain *domain = d->domain;
185 struct irq_domain_chip_generic *dgc = domain->gc; 180 struct irq_domain_chip_generic *dgc = domain->gc;
186 struct irq_chip_generic *bgc = dgc->gc[0]; 181 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
187 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 182 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
188 int i; 183 int i;
189 u32 mask; 184 u32 mask;
@@ -207,7 +202,7 @@ static void aic5_pm_shutdown(struct irq_data *d)
207{ 202{
208 struct irq_domain *domain = d->domain; 203 struct irq_domain *domain = d->domain;
209 struct irq_domain_chip_generic *dgc = domain->gc; 204 struct irq_domain_chip_generic *dgc = domain->gc;
210 struct irq_chip_generic *bgc = dgc->gc[0]; 205 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
211 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 206 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
212 int i; 207 int i;
213 208
@@ -262,12 +257,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
262 irq_hw_number_t *out_hwirq, 257 irq_hw_number_t *out_hwirq,
263 unsigned int *out_type) 258 unsigned int *out_type)
264{ 259{
265 struct irq_domain_chip_generic *dgc = d->gc; 260 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
266 struct irq_chip_generic *gc;
267 unsigned smr; 261 unsigned smr;
268 int ret; 262 int ret;
269 263
270 if (!dgc) 264 if (!bgc)
271 return -EINVAL; 265 return -EINVAL;
272 266
273 ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, 267 ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
@@ -275,15 +269,13 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
275 if (ret) 269 if (ret)
276 return ret; 270 return ret;
277 271
278 gc = dgc->gc[0]; 272 irq_gc_lock(bgc);
279 273 irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
280 irq_gc_lock(gc); 274 smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
281 irq_reg_writel(gc, *out_hwirq, AT91_AIC5_SSR);
282 smr = irq_reg_readl(gc, AT91_AIC5_SMR);
283 ret = aic_common_set_priority(intspec[2], &smr); 275 ret = aic_common_set_priority(intspec[2], &smr);
284 if (!ret) 276 if (!ret)
285 irq_reg_writel(gc, intspec[2] | smr, AT91_AIC5_SMR); 277 irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR);
286 irq_gc_unlock(gc); 278 irq_gc_unlock(bgc);
287 279
288 return ret; 280 return ret;
289} 281}
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index a7f5626930f5..75573fa431ba 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -78,10 +78,13 @@ static struct irq_chip crossbar_chip = {
78static int allocate_gic_irq(struct irq_domain *domain, unsigned virq, 78static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
79 irq_hw_number_t hwirq) 79 irq_hw_number_t hwirq)
80{ 80{
81 struct of_phandle_args args; 81 struct irq_fwspec fwspec;
82 int i; 82 int i;
83 int err; 83 int err;
84 84
85 if (!irq_domain_get_of_node(domain->parent))
86 return -EINVAL;
87
85 raw_spin_lock(&cb->lock); 88 raw_spin_lock(&cb->lock);
86 for (i = cb->int_max - 1; i >= 0; i--) { 89 for (i = cb->int_max - 1; i >= 0; i--) {
87 if (cb->irq_map[i] == IRQ_FREE) { 90 if (cb->irq_map[i] == IRQ_FREE) {
@@ -94,13 +97,13 @@ static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
94 if (i < 0) 97 if (i < 0)
95 return -ENODEV; 98 return -ENODEV;
96 99
97 args.np = domain->parent->of_node; 100 fwspec.fwnode = domain->parent->fwnode;
98 args.args_count = 3; 101 fwspec.param_count = 3;
99 args.args[0] = 0; /* SPI */ 102 fwspec.param[0] = 0; /* SPI */
100 args.args[1] = i; 103 fwspec.param[1] = i;
101 args.args[2] = IRQ_TYPE_LEVEL_HIGH; 104 fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
102 105
103 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); 106 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
104 if (err) 107 if (err)
105 cb->irq_map[i] = IRQ_FREE; 108 cb->irq_map[i] = IRQ_FREE;
106 else 109 else
@@ -112,16 +115,16 @@ static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
112static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq, 115static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq,
113 unsigned int nr_irqs, void *data) 116 unsigned int nr_irqs, void *data)
114{ 117{
115 struct of_phandle_args *args = data; 118 struct irq_fwspec *fwspec = data;
116 irq_hw_number_t hwirq; 119 irq_hw_number_t hwirq;
117 int i; 120 int i;
118 121
119 if (args->args_count != 3) 122 if (fwspec->param_count != 3)
120 return -EINVAL; /* Not GIC compliant */ 123 return -EINVAL; /* Not GIC compliant */
121 if (args->args[0] != 0) 124 if (fwspec->param[0] != 0)
122 return -EINVAL; /* No PPI should point to this domain */ 125 return -EINVAL; /* No PPI should point to this domain */
123 126
124 hwirq = args->args[1]; 127 hwirq = fwspec->param[1];
125 if ((hwirq + nr_irqs) > cb->max_crossbar_sources) 128 if ((hwirq + nr_irqs) > cb->max_crossbar_sources)
126 return -EINVAL; /* Can't deal with this */ 129 return -EINVAL; /* Can't deal with this */
127 130
@@ -166,28 +169,31 @@ static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq,
166 raw_spin_unlock(&cb->lock); 169 raw_spin_unlock(&cb->lock);
167} 170}
168 171
169static int crossbar_domain_xlate(struct irq_domain *d, 172static int crossbar_domain_translate(struct irq_domain *d,
170 struct device_node *controller, 173 struct irq_fwspec *fwspec,
171 const u32 *intspec, unsigned int intsize, 174 unsigned long *hwirq,
172 unsigned long *out_hwirq, 175 unsigned int *type)
173 unsigned int *out_type)
174{ 176{
175 if (d->of_node != controller) 177 if (is_of_node(fwspec->fwnode)) {
176 return -EINVAL; /* Shouldn't happen, really... */ 178 if (fwspec->param_count != 3)
177 if (intsize != 3) 179 return -EINVAL;
178 return -EINVAL; /* Not GIC compliant */
179 if (intspec[0] != 0)
180 return -EINVAL; /* No PPI should point to this domain */
181 180
182 *out_hwirq = intspec[1]; 181 /* No PPI should point to this domain */
183 *out_type = intspec[2]; 182 if (fwspec->param[0] != 0)
184 return 0; 183 return -EINVAL;
184
185 *hwirq = fwspec->param[1];
186 *type = fwspec->param[2];
187 return 0;
188 }
189
190 return -EINVAL;
185} 191}
186 192
187static const struct irq_domain_ops crossbar_domain_ops = { 193static const struct irq_domain_ops crossbar_domain_ops = {
188 .alloc = crossbar_domain_alloc, 194 .alloc = crossbar_domain_alloc,
189 .free = crossbar_domain_free, 195 .free = crossbar_domain_free,
190 .xlate = crossbar_domain_xlate, 196 .translate = crossbar_domain_translate,
191}; 197};
192 198
193static int __init crossbar_of_init(struct device_node *node) 199static int __init crossbar_of_init(struct device_node *node)
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 9448e391cb71..44a077f3a4a2 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -21,6 +21,17 @@
21 21
22#include "irq-gic-common.h" 22#include "irq-gic-common.h"
23 23
24void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
25 void *data)
26{
27 for (; quirks->desc; quirks++) {
28 if (quirks->iidr != (quirks->mask & iidr))
29 continue;
30 quirks->init(data);
31 pr_info("GIC: enabling workaround for %s\n", quirks->desc);
32 }
33}
34
24int gic_configure_irq(unsigned int irq, unsigned int type, 35int gic_configure_irq(unsigned int irq, unsigned int type,
25 void __iomem *base, void (*sync_access)(void)) 36 void __iomem *base, void (*sync_access)(void))
26{ 37{
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index 35a9884778bd..fff697db8e22 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -20,10 +20,19 @@
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/irqdomain.h> 21#include <linux/irqdomain.h>
22 22
23struct gic_quirk {
24 const char *desc;
25 void (*init)(void *data);
26 u32 iidr;
27 u32 mask;
28};
29
23int gic_configure_irq(unsigned int irq, unsigned int type, 30int gic_configure_irq(unsigned int irq, unsigned int type,
24 void __iomem *base, void (*sync_access)(void)); 31 void __iomem *base, void (*sync_access)(void));
25void gic_dist_config(void __iomem *base, int gic_irqs, 32void gic_dist_config(void __iomem *base, int gic_irqs,
26 void (*sync_access)(void)); 33 void (*sync_access)(void));
27void gic_cpu_config(void __iomem *base, void (*sync_access)(void)); 34void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
35void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
36 void *data);
28 37
29#endif /* _IRQ_GIC_COMMON_H */ 38#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 12985daa66ab..87f8d104acab 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -37,19 +37,31 @@
37#define V2M_MSI_SETSPI_NS 0x040 37#define V2M_MSI_SETSPI_NS 0x040
38#define V2M_MIN_SPI 32 38#define V2M_MIN_SPI 32
39#define V2M_MAX_SPI 1019 39#define V2M_MAX_SPI 1019
40#define V2M_MSI_IIDR 0xFCC
40 41
41#define V2M_MSI_TYPER_BASE_SPI(x) \ 42#define V2M_MSI_TYPER_BASE_SPI(x) \
42 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) 43 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
43 44
44#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) 45#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
45 46
47/* APM X-Gene with GICv2m MSI_IIDR register value */
48#define XGENE_GICV2M_MSI_IIDR 0x06000170
49
50/* List of flags for specific v2m implementation */
51#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
52
53static LIST_HEAD(v2m_nodes);
54static DEFINE_SPINLOCK(v2m_lock);
55
46struct v2m_data { 56struct v2m_data {
47 spinlock_t msi_cnt_lock; 57 struct list_head entry;
58 struct device_node *node;
48 struct resource res; /* GICv2m resource */ 59 struct resource res; /* GICv2m resource */
49 void __iomem *base; /* GICv2m virt address */ 60 void __iomem *base; /* GICv2m virt address */
50 u32 spi_start; /* The SPI number that MSIs start */ 61 u32 spi_start; /* The SPI number that MSIs start */
51 u32 nr_spis; /* The number of SPIs for MSIs */ 62 u32 nr_spis; /* The number of SPIs for MSIs */
52 unsigned long *bm; /* MSI vector bitmap */ 63 unsigned long *bm; /* MSI vector bitmap */
64 u32 flags; /* v2m flags for specific implementation */
53}; 65};
54 66
55static void gicv2m_mask_msi_irq(struct irq_data *d) 67static void gicv2m_mask_msi_irq(struct irq_data *d)
@@ -98,6 +110,9 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
98 msg->address_hi = upper_32_bits(addr); 110 msg->address_hi = upper_32_bits(addr);
99 msg->address_lo = lower_32_bits(addr); 111 msg->address_lo = lower_32_bits(addr);
100 msg->data = data->hwirq; 112 msg->data = data->hwirq;
113
114 if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
115 msg->data -= v2m->spi_start;
101} 116}
102 117
103static struct irq_chip gicv2m_irq_chip = { 118static struct irq_chip gicv2m_irq_chip = {
@@ -113,17 +128,21 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
113 unsigned int virq, 128 unsigned int virq,
114 irq_hw_number_t hwirq) 129 irq_hw_number_t hwirq)
115{ 130{
116 struct of_phandle_args args; 131 struct irq_fwspec fwspec;
117 struct irq_data *d; 132 struct irq_data *d;
118 int err; 133 int err;
119 134
120 args.np = domain->parent->of_node; 135 if (is_of_node(domain->parent->fwnode)) {
121 args.args_count = 3; 136 fwspec.fwnode = domain->parent->fwnode;
122 args.args[0] = 0; 137 fwspec.param_count = 3;
123 args.args[1] = hwirq - 32; 138 fwspec.param[0] = 0;
124 args.args[2] = IRQ_TYPE_EDGE_RISING; 139 fwspec.param[1] = hwirq - 32;
140 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
141 } else {
142 return -EINVAL;
143 }
125 144
126 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); 145 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
127 if (err) 146 if (err)
128 return err; 147 return err;
129 148
@@ -143,27 +162,30 @@ static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
143 return; 162 return;
144 } 163 }
145 164
146 spin_lock(&v2m->msi_cnt_lock); 165 spin_lock(&v2m_lock);
147 __clear_bit(pos, v2m->bm); 166 __clear_bit(pos, v2m->bm);
148 spin_unlock(&v2m->msi_cnt_lock); 167 spin_unlock(&v2m_lock);
149} 168}
150 169
151static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 170static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
152 unsigned int nr_irqs, void *args) 171 unsigned int nr_irqs, void *args)
153{ 172{
154 struct v2m_data *v2m = domain->host_data; 173 struct v2m_data *v2m = NULL, *tmp;
155 int hwirq, offset, err = 0; 174 int hwirq, offset, err = 0;
156 175
157 spin_lock(&v2m->msi_cnt_lock); 176 spin_lock(&v2m_lock);
158 offset = find_first_zero_bit(v2m->bm, v2m->nr_spis); 177 list_for_each_entry(tmp, &v2m_nodes, entry) {
159 if (offset < v2m->nr_spis) 178 offset = find_first_zero_bit(tmp->bm, tmp->nr_spis);
160 __set_bit(offset, v2m->bm); 179 if (offset < tmp->nr_spis) {
161 else 180 __set_bit(offset, tmp->bm);
162 err = -ENOSPC; 181 v2m = tmp;
163 spin_unlock(&v2m->msi_cnt_lock); 182 break;
183 }
184 }
185 spin_unlock(&v2m_lock);
164 186
165 if (err) 187 if (!v2m)
166 return err; 188 return -ENOSPC;
167 189
168 hwirq = v2m->spi_start + offset; 190 hwirq = v2m->spi_start + offset;
169 191
@@ -224,12 +246,61 @@ static struct msi_domain_info gicv2m_pmsi_domain_info = {
224 .chip = &gicv2m_pmsi_irq_chip, 246 .chip = &gicv2m_pmsi_irq_chip,
225}; 247};
226 248
249static void gicv2m_teardown(void)
250{
251 struct v2m_data *v2m, *tmp;
252
253 list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
254 list_del(&v2m->entry);
255 kfree(v2m->bm);
256 iounmap(v2m->base);
257 of_node_put(v2m->node);
258 kfree(v2m);
259 }
260}
261
262static int gicv2m_allocate_domains(struct irq_domain *parent)
263{
264 struct irq_domain *inner_domain, *pci_domain, *plat_domain;
265 struct v2m_data *v2m;
266
267 v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
268 if (!v2m)
269 return 0;
270
271 inner_domain = irq_domain_create_tree(of_node_to_fwnode(v2m->node),
272 &gicv2m_domain_ops, v2m);
273 if (!inner_domain) {
274 pr_err("Failed to create GICv2m domain\n");
275 return -ENOMEM;
276 }
277
278 inner_domain->bus_token = DOMAIN_BUS_NEXUS;
279 inner_domain->parent = parent;
280 pci_domain = pci_msi_create_irq_domain(of_node_to_fwnode(v2m->node),
281 &gicv2m_msi_domain_info,
282 inner_domain);
283 plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(v2m->node),
284 &gicv2m_pmsi_domain_info,
285 inner_domain);
286 if (!pci_domain || !plat_domain) {
287 pr_err("Failed to create MSI domains\n");
288 if (plat_domain)
289 irq_domain_remove(plat_domain);
290 if (pci_domain)
291 irq_domain_remove(pci_domain);
292 irq_domain_remove(inner_domain);
293 return -ENOMEM;
294 }
295
296 return 0;
297}
298
227static int __init gicv2m_init_one(struct device_node *node, 299static int __init gicv2m_init_one(struct device_node *node,
228 struct irq_domain *parent) 300 struct irq_domain *parent)
229{ 301{
230 int ret; 302 int ret;
231 struct v2m_data *v2m; 303 struct v2m_data *v2m;
232 struct irq_domain *inner_domain, *pci_domain, *plat_domain;
233 304
234 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); 305 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
235 if (!v2m) { 306 if (!v2m) {
@@ -237,6 +308,9 @@ static int __init gicv2m_init_one(struct device_node *node,
237 return -ENOMEM; 308 return -ENOMEM;
238 } 309 }
239 310
311 INIT_LIST_HEAD(&v2m->entry);
312 v2m->node = node;
313
240 ret = of_address_to_resource(node, 0, &v2m->res); 314 ret = of_address_to_resource(node, 0, &v2m->res);
241 if (ret) { 315 if (ret) {
242 pr_err("Failed to allocate v2m resource.\n"); 316 pr_err("Failed to allocate v2m resource.\n");
@@ -266,6 +340,17 @@ static int __init gicv2m_init_one(struct device_node *node,
266 goto err_iounmap; 340 goto err_iounmap;
267 } 341 }
268 342
343 /*
344 * APM X-Gene GICv2m implementation has an erratum where
345 * the MSI data needs to be the offset from the spi_start
346 * in order to trigger the correct MSI interrupt. This is
347 * different from the standard GICv2m implementation where
348 * the MSI data is the absolute value within the range from
349 * spi_start to (spi_start + num_spis).
350 */
351 if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR)
352 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
353
269 v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), 354 v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
270 GFP_KERNEL); 355 GFP_KERNEL);
271 if (!v2m->bm) { 356 if (!v2m->bm) {
@@ -273,43 +358,13 @@ static int __init gicv2m_init_one(struct device_node *node,
273 goto err_iounmap; 358 goto err_iounmap;
274 } 359 }
275 360
276 inner_domain = irq_domain_add_tree(node, &gicv2m_domain_ops, v2m); 361 list_add_tail(&v2m->entry, &v2m_nodes);
277 if (!inner_domain) {
278 pr_err("Failed to create GICv2m domain\n");
279 ret = -ENOMEM;
280 goto err_free_bm;
281 }
282
283 inner_domain->bus_token = DOMAIN_BUS_NEXUS;
284 inner_domain->parent = parent;
285 pci_domain = pci_msi_create_irq_domain(node, &gicv2m_msi_domain_info,
286 inner_domain);
287 plat_domain = platform_msi_create_irq_domain(node,
288 &gicv2m_pmsi_domain_info,
289 inner_domain);
290 if (!pci_domain || !plat_domain) {
291 pr_err("Failed to create MSI domains\n");
292 ret = -ENOMEM;
293 goto err_free_domains;
294 }
295
296 spin_lock_init(&v2m->msi_cnt_lock);
297
298 pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, 362 pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
299 (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, 363 (unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
300 v2m->spi_start, (v2m->spi_start + v2m->nr_spis)); 364 v2m->spi_start, (v2m->spi_start + v2m->nr_spis));
301 365
302 return 0; 366 return 0;
303 367
304err_free_domains:
305 if (plat_domain)
306 irq_domain_remove(plat_domain);
307 if (pci_domain)
308 irq_domain_remove(pci_domain);
309 if (inner_domain)
310 irq_domain_remove(inner_domain);
311err_free_bm:
312 kfree(v2m->bm);
313err_iounmap: 368err_iounmap:
314 iounmap(v2m->base); 369 iounmap(v2m->base);
315err_free_v2m: 370err_free_v2m:
@@ -339,5 +394,9 @@ int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent)
339 } 394 }
340 } 395 }
341 396
397 if (!ret)
398 ret = gicv2m_allocate_domains(parent);
399 if (ret)
400 gicv2m_teardown();
342 return ret; 401 return ret;
343} 402}
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index a7c8c9ffbafd..aee60ed025dc 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -42,7 +42,6 @@ static struct irq_chip its_msi_irq_chip = {
42 42
43struct its_pci_alias { 43struct its_pci_alias {
44 struct pci_dev *pdev; 44 struct pci_dev *pdev;
45 u32 dev_id;
46 u32 count; 45 u32 count;
47}; 46};
48 47
@@ -60,7 +59,6 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
60{ 59{
61 struct its_pci_alias *dev_alias = data; 60 struct its_pci_alias *dev_alias = data;
62 61
63 dev_alias->dev_id = alias;
64 if (pdev != dev_alias->pdev) 62 if (pdev != dev_alias->pdev)
65 dev_alias->count += its_pci_msi_vec_count(pdev); 63 dev_alias->count += its_pci_msi_vec_count(pdev);
66 64
@@ -86,7 +84,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
86 pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); 84 pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
87 85
88 /* ITS specific DeviceID, as the core ITS ignores dev. */ 86 /* ITS specific DeviceID, as the core ITS ignores dev. */
89 info->scratchpad[0].ul = dev_alias.dev_id; 87 info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
90 88
91 return msi_info->ops->msi_prepare(domain->parent, 89 return msi_info->ops->msi_prepare(domain->parent,
92 dev, dev_alias.count, info); 90 dev, dev_alias.count, info);
@@ -125,7 +123,8 @@ static int __init its_pci_msi_init(void)
125 continue; 123 continue;
126 } 124 }
127 125
128 if (!pci_msi_create_irq_domain(np, &its_pci_msi_domain_info, 126 if (!pci_msi_create_irq_domain(of_node_to_fwnode(np),
127 &its_pci_msi_domain_info,
129 parent)) { 128 parent)) {
130 pr_err("%s: unable to create PCI domain\n", 129 pr_err("%s: unable to create PCI domain\n",
131 np->full_name); 130 np->full_name);
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index a86550562779..470b4aa7d62c 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -29,13 +29,25 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
29{ 29{
30 struct msi_domain_info *msi_info; 30 struct msi_domain_info *msi_info;
31 u32 dev_id; 31 u32 dev_id;
32 int ret; 32 int ret, index = 0;
33 33
34 msi_info = msi_get_domain_info(domain->parent); 34 msi_info = msi_get_domain_info(domain->parent);
35 35
36 /* Suck the DeviceID out of the msi-parent property */ 36 /* Suck the DeviceID out of the msi-parent property */
37 ret = of_property_read_u32_index(dev->of_node, "msi-parent", 37 do {
38 1, &dev_id); 38 struct of_phandle_args args;
39
40 ret = of_parse_phandle_with_args(dev->of_node,
41 "msi-parent", "#msi-cells",
42 index, &args);
43 if (args.np == irq_domain_get_of_node(domain)) {
44 if (WARN_ON(args.args_count != 1))
45 return -EINVAL;
46 dev_id = args.args[0];
47 break;
48 }
49 } while (!ret);
50
39 if (ret) 51 if (ret)
40 return ret; 52 return ret;
41 53
@@ -78,7 +90,8 @@ static int __init its_pmsi_init(void)
78 continue; 90 continue;
79 } 91 }
80 92
81 if (!platform_msi_create_irq_domain(np, &its_pmsi_domain_info, 93 if (!platform_msi_create_irq_domain(of_node_to_fwnode(np),
94 &its_pmsi_domain_info,
82 parent)) { 95 parent)) {
83 pr_err("%s: unable to create platform domain\n", 96 pr_err("%s: unable to create platform domain\n",
84 np->full_name); 97 np->full_name);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 25ceae9f7348..e23d1d18f9d6 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -37,7 +37,10 @@
37#include <asm/cputype.h> 37#include <asm/cputype.h>
38#include <asm/exception.h> 38#include <asm/exception.h>
39 39
40#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0) 40#include "irq-gic-common.h"
41
42#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
41 44
42#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 45#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
43 46
@@ -817,7 +820,22 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
817 int i; 820 int i;
818 int psz = SZ_64K; 821 int psz = SZ_64K;
819 u64 shr = GITS_BASER_InnerShareable; 822 u64 shr = GITS_BASER_InnerShareable;
820 u64 cache = GITS_BASER_WaWb; 823 u64 cache;
824 u64 typer;
825 u32 ids;
826
827 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
828 /*
829 * erratum 22375: only alloc 8MB table size
830 * erratum 24313: ignore memory access type
831 */
832 cache = 0;
833 ids = 0x14; /* 20 bits, 8MB */
834 } else {
835 cache = GITS_BASER_WaWb;
836 typer = readq_relaxed(its->base + GITS_TYPER);
837 ids = GITS_TYPER_DEVBITS(typer);
838 }
821 839
822 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 840 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
823 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); 841 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -825,6 +843,7 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
825 u64 entry_size = GITS_BASER_ENTRY_SIZE(val); 843 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
826 int order = get_order(psz); 844 int order = get_order(psz);
827 int alloc_size; 845 int alloc_size;
846 int alloc_pages;
828 u64 tmp; 847 u64 tmp;
829 void *base; 848 void *base;
830 849
@@ -840,9 +859,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
840 * For other tables, only allocate a single page. 859 * For other tables, only allocate a single page.
841 */ 860 */
842 if (type == GITS_BASER_TYPE_DEVICE) { 861 if (type == GITS_BASER_TYPE_DEVICE) {
843 u64 typer = readq_relaxed(its->base + GITS_TYPER);
844 u32 ids = GITS_TYPER_DEVBITS(typer);
845
846 /* 862 /*
847 * 'order' was initialized earlier to the default page 863 * 'order' was initialized earlier to the default page
848 * granule of the the ITS. We can't have an allocation 864 * granule of the the ITS. We can't have an allocation
@@ -859,6 +875,14 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
859 } 875 }
860 876
861 alloc_size = (1 << order) * PAGE_SIZE; 877 alloc_size = (1 << order) * PAGE_SIZE;
878 alloc_pages = (alloc_size / psz);
879 if (alloc_pages > GITS_BASER_PAGES_MAX) {
880 alloc_pages = GITS_BASER_PAGES_MAX;
881 order = get_order(GITS_BASER_PAGES_MAX * psz);
882 pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n",
883 node_name, order, alloc_pages);
884 }
885
862 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 886 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
863 if (!base) { 887 if (!base) {
864 err = -ENOMEM; 888 err = -ENOMEM;
@@ -887,7 +911,7 @@ retry_baser:
887 break; 911 break;
888 } 912 }
889 913
890 val |= (alloc_size / psz) - 1; 914 val |= alloc_pages - 1;
891 915
892 writeq_relaxed(val, its->base + GITS_BASER + i * 8); 916 writeq_relaxed(val, its->base + GITS_BASER + i * 8);
893 tmp = readq_relaxed(its->base + GITS_BASER + i * 8); 917 tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -1241,15 +1265,19 @@ static int its_irq_gic_domain_alloc(struct irq_domain *domain,
1241 unsigned int virq, 1265 unsigned int virq,
1242 irq_hw_number_t hwirq) 1266 irq_hw_number_t hwirq)
1243{ 1267{
1244 struct of_phandle_args args; 1268 struct irq_fwspec fwspec;
1245 1269
1246 args.np = domain->parent->of_node; 1270 if (irq_domain_get_of_node(domain->parent)) {
1247 args.args_count = 3; 1271 fwspec.fwnode = domain->parent->fwnode;
1248 args.args[0] = GIC_IRQ_TYPE_LPI; 1272 fwspec.param_count = 3;
1249 args.args[1] = hwirq; 1273 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
1250 args.args[2] = IRQ_TYPE_EDGE_RISING; 1274 fwspec.param[1] = hwirq;
1275 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
1276 } else {
1277 return -EINVAL;
1278 }
1251 1279
1252 return irq_domain_alloc_irqs_parent(domain, virq, 1, &args); 1280 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
1253} 1281}
1254 1282
1255static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1283static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -1370,6 +1398,33 @@ static int its_force_quiescent(void __iomem *base)
1370 } 1398 }
1371} 1399}
1372 1400
1401static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
1402{
1403 struct its_node *its = data;
1404
1405 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
1406}
1407
1408static const struct gic_quirk its_quirks[] = {
1409#ifdef CONFIG_CAVIUM_ERRATUM_22375
1410 {
1411 .desc = "ITS: Cavium errata 22375, 24313",
1412 .iidr = 0xa100034c, /* ThunderX pass 1.x */
1413 .mask = 0xffff0fff,
1414 .init = its_enable_quirk_cavium_22375,
1415 },
1416#endif
1417 {
1418 }
1419};
1420
1421static void its_enable_quirks(struct its_node *its)
1422{
1423 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
1424
1425 gic_enable_quirks(iidr, its_quirks, its);
1426}
1427
1373static int its_probe(struct device_node *node, struct irq_domain *parent) 1428static int its_probe(struct device_node *node, struct irq_domain *parent)
1374{ 1429{
1375 struct resource res; 1430 struct resource res;
@@ -1428,6 +1483,8 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
1428 } 1483 }
1429 its->cmd_write = its->cmd_base; 1484 its->cmd_write = its->cmd_base;
1430 1485
1486 its_enable_quirks(its);
1487
1431 err = its_alloc_tables(node->full_name, its); 1488 err = its_alloc_tables(node->full_name, its);
1432 if (err) 1489 if (err)
1433 goto out_free_cmd; 1490 goto out_free_cmd;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 36ecfc870e5a..d7be6ddc34f6 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -108,57 +108,17 @@ static void gic_redist_wait_for_rwp(void)
108 gic_do_wait_for_rwp(gic_data_rdist_rd_base()); 108 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
109} 109}
110 110
111/* Low level accessors */ 111#ifdef CONFIG_ARM64
112static u64 __maybe_unused gic_read_iar(void) 112static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
113{
114 u64 irqstat;
115
116 asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
117 return irqstat;
118}
119
120static void __maybe_unused gic_write_pmr(u64 val)
121{
122 asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
123}
124
125static void __maybe_unused gic_write_ctlr(u64 val)
126{
127 asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
128 isb();
129}
130
131static void __maybe_unused gic_write_grpen1(u64 val)
132{
133 asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
134 isb();
135}
136 113
137static void __maybe_unused gic_write_sgi1r(u64 val) 114static u64 __maybe_unused gic_read_iar(void)
138{
139 asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
140}
141
142static void gic_enable_sre(void)
143{ 115{
144 u64 val; 116 if (static_branch_unlikely(&is_cavium_thunderx))
145 117 return gic_read_iar_cavium_thunderx();
146 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); 118 else
147 val |= ICC_SRE_EL1_SRE; 119 return gic_read_iar_common();
148 asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
149 isb();
150
151 /*
152 * Need to check that the SRE bit has actually been set. If
153 * not, it means that SRE is disabled at EL2. We're going to
154 * die painfully, and there is nothing we can do about it.
155 *
156 * Kindly inform the luser.
157 */
158 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
159 if (!(val & ICC_SRE_EL1_SRE))
160 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
161} 120}
121#endif
162 122
163static void gic_enable_redist(bool enable) 123static void gic_enable_redist(bool enable)
164{ 124{
@@ -359,11 +319,11 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
359 return 0; 319 return 0;
360} 320}
361 321
362static u64 gic_mpidr_to_affinity(u64 mpidr) 322static u64 gic_mpidr_to_affinity(unsigned long mpidr)
363{ 323{
364 u64 aff; 324 u64 aff;
365 325
366 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | 326 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
367 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 327 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
368 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 328 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
369 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 329 MPIDR_AFFINITY_LEVEL(mpidr, 0));
@@ -373,7 +333,7 @@ static u64 gic_mpidr_to_affinity(u64 mpidr)
373 333
374static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 334static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
375{ 335{
376 u64 irqnr; 336 u32 irqnr;
377 337
378 do { 338 do {
379 irqnr = gic_read_iar(); 339 irqnr = gic_read_iar();
@@ -432,12 +392,12 @@ static void __init gic_dist_init(void)
432 */ 392 */
433 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); 393 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
434 for (i = 32; i < gic_data.irq_nr; i++) 394 for (i = 32; i < gic_data.irq_nr; i++)
435 writeq_relaxed(affinity, base + GICD_IROUTER + i * 8); 395 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
436} 396}
437 397
438static int gic_populate_rdist(void) 398static int gic_populate_rdist(void)
439{ 399{
440 u64 mpidr = cpu_logical_map(smp_processor_id()); 400 unsigned long mpidr = cpu_logical_map(smp_processor_id());
441 u64 typer; 401 u64 typer;
442 u32 aff; 402 u32 aff;
443 int i; 403 int i;
@@ -463,15 +423,14 @@ static int gic_populate_rdist(void)
463 } 423 }
464 424
465 do { 425 do {
466 typer = readq_relaxed(ptr + GICR_TYPER); 426 typer = gic_read_typer(ptr + GICR_TYPER);
467 if ((typer >> 32) == aff) { 427 if ((typer >> 32) == aff) {
468 u64 offset = ptr - gic_data.redist_regions[i].redist_base; 428 u64 offset = ptr - gic_data.redist_regions[i].redist_base;
469 gic_data_rdist_rd_base() = ptr; 429 gic_data_rdist_rd_base() = ptr;
470 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset; 430 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
471 pr_info("CPU%d: found redistributor %llx region %d:%pa\n", 431 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
472 smp_processor_id(), 432 smp_processor_id(), mpidr, i,
473 (unsigned long long)mpidr, 433 &gic_data_rdist()->phys_base);
474 i, &gic_data_rdist()->phys_base);
475 return 0; 434 return 0;
476 } 435 }
477 436
@@ -486,15 +445,22 @@ static int gic_populate_rdist(void)
486 } 445 }
487 446
488 /* We couldn't even deal with ourselves... */ 447 /* We couldn't even deal with ourselves... */
489 WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n", 448 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
490 smp_processor_id(), (unsigned long long)mpidr); 449 smp_processor_id(), mpidr);
491 return -ENODEV; 450 return -ENODEV;
492} 451}
493 452
494static void gic_cpu_sys_reg_init(void) 453static void gic_cpu_sys_reg_init(void)
495{ 454{
496 /* Enable system registers */ 455 /*
497 gic_enable_sre(); 456 * Need to check that the SRE bit has actually been set. If
457 * not, it means that SRE is disabled at EL2. We're going to
458 * die painfully, and there is nothing we can do about it.
459 *
460 * Kindly inform the luser.
461 */
462 if (!gic_enable_sre())
463 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
498 464
499 /* Set priority mask register */ 465 /* Set priority mask register */
500 gic_write_pmr(DEFAULT_PMR_VALUE); 466 gic_write_pmr(DEFAULT_PMR_VALUE);
@@ -557,10 +523,10 @@ static struct notifier_block gic_cpu_notifier = {
557}; 523};
558 524
559static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, 525static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
560 u64 cluster_id) 526 unsigned long cluster_id)
561{ 527{
562 int cpu = *base_cpu; 528 int cpu = *base_cpu;
563 u64 mpidr = cpu_logical_map(cpu); 529 unsigned long mpidr = cpu_logical_map(cpu);
564 u16 tlist = 0; 530 u16 tlist = 0;
565 531
566 while (cpu < nr_cpu_ids) { 532 while (cpu < nr_cpu_ids) {
@@ -621,7 +587,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
621 smp_wmb(); 587 smp_wmb();
622 588
623 for_each_cpu(cpu, mask) { 589 for_each_cpu(cpu, mask) {
624 u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; 590 unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
625 u16 tlist; 591 u16 tlist;
626 592
627 tlist = gic_compute_target_list(&cpu, mask, cluster_id); 593 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
@@ -657,7 +623,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
657 reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); 623 reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
658 val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); 624 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
659 625
660 writeq_relaxed(val, reg); 626 gic_write_irouter(val, reg);
661 627
662 /* 628 /*
663 * If the interrupt was enabled, enabled it again. Otherwise, 629 * If the interrupt was enabled, enabled it again. Otherwise,
@@ -771,32 +737,34 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
771 return 0; 737 return 0;
772} 738}
773 739
774static int gic_irq_domain_xlate(struct irq_domain *d, 740static int gic_irq_domain_translate(struct irq_domain *d,
775 struct device_node *controller, 741 struct irq_fwspec *fwspec,
776 const u32 *intspec, unsigned int intsize, 742 unsigned long *hwirq,
777 unsigned long *out_hwirq, unsigned int *out_type) 743 unsigned int *type)
778{ 744{
779 if (d->of_node != controller) 745 if (is_of_node(fwspec->fwnode)) {
780 return -EINVAL; 746 if (fwspec->param_count < 3)
781 if (intsize < 3) 747 return -EINVAL;
782 return -EINVAL;
783 748
784 switch(intspec[0]) { 749 switch (fwspec->param[0]) {
785 case 0: /* SPI */ 750 case 0: /* SPI */
786 *out_hwirq = intspec[1] + 32; 751 *hwirq = fwspec->param[1] + 32;
787 break; 752 break;
788 case 1: /* PPI */ 753 case 1: /* PPI */
789 *out_hwirq = intspec[1] + 16; 754 *hwirq = fwspec->param[1] + 16;
790 break; 755 break;
791 case GIC_IRQ_TYPE_LPI: /* LPI */ 756 case GIC_IRQ_TYPE_LPI: /* LPI */
792 *out_hwirq = intspec[1]; 757 *hwirq = fwspec->param[1];
793 break; 758 break;
794 default: 759 default:
795 return -EINVAL; 760 return -EINVAL;
761 }
762
763 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
764 return 0;
796 } 765 }
797 766
798 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 767 return -EINVAL;
799 return 0;
800} 768}
801 769
802static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 770static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -805,10 +773,9 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
805 int i, ret; 773 int i, ret;
806 irq_hw_number_t hwirq; 774 irq_hw_number_t hwirq;
807 unsigned int type = IRQ_TYPE_NONE; 775 unsigned int type = IRQ_TYPE_NONE;
808 struct of_phandle_args *irq_data = arg; 776 struct irq_fwspec *fwspec = arg;
809 777
810 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, 778 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
811 irq_data->args_count, &hwirq, &type);
812 if (ret) 779 if (ret)
813 return ret; 780 return ret;
814 781
@@ -831,11 +798,19 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
831} 798}
832 799
833static const struct irq_domain_ops gic_irq_domain_ops = { 800static const struct irq_domain_ops gic_irq_domain_ops = {
834 .xlate = gic_irq_domain_xlate, 801 .translate = gic_irq_domain_translate,
835 .alloc = gic_irq_domain_alloc, 802 .alloc = gic_irq_domain_alloc,
836 .free = gic_irq_domain_free, 803 .free = gic_irq_domain_free,
837}; 804};
838 805
806static void gicv3_enable_quirks(void)
807{
808#ifdef CONFIG_ARM64
809 if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
810 static_branch_enable(&is_cavium_thunderx);
811#endif
812}
813
839static int __init gic_of_init(struct device_node *node, struct device_node *parent) 814static int __init gic_of_init(struct device_node *node, struct device_node *parent)
840{ 815{
841 void __iomem *dist_base; 816 void __iomem *dist_base;
@@ -901,6 +876,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
901 gic_data.nr_redist_regions = nr_redist_regions; 876 gic_data.nr_redist_regions = nr_redist_regions;
902 gic_data.redist_stride = redist_stride; 877 gic_data.redist_stride = redist_stride;
903 878
879 gicv3_enable_quirks();
880
904 /* 881 /*
905 * Find out how many interrupts are supported. 882 * Find out how many interrupts are supported.
906 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) 883 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 982c09c2d791..1d0e76855106 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -51,6 +51,19 @@
51 51
52#include "irq-gic-common.h" 52#include "irq-gic-common.h"
53 53
54#ifdef CONFIG_ARM64
55#include <asm/cpufeature.h>
56
57static void gic_check_cpu_features(void)
58{
59 WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
60 TAINT_CPU_OUT_OF_SPEC,
61 "GICv3 system registers enabled, broken firmware!\n");
62}
63#else
64#define gic_check_cpu_features() do { } while(0)
65#endif
66
54union gic_base { 67union gic_base {
55 void __iomem *common_base; 68 void __iomem *common_base;
56 void __percpu * __iomem *percpu_base; 69 void __percpu * __iomem *percpu_base;
@@ -903,28 +916,39 @@ static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
903{ 916{
904} 917}
905 918
906static int gic_irq_domain_xlate(struct irq_domain *d, 919static int gic_irq_domain_translate(struct irq_domain *d,
907 struct device_node *controller, 920 struct irq_fwspec *fwspec,
908 const u32 *intspec, unsigned int intsize, 921 unsigned long *hwirq,
909 unsigned long *out_hwirq, unsigned int *out_type) 922 unsigned int *type)
910{ 923{
911 unsigned long ret = 0; 924 if (is_of_node(fwspec->fwnode)) {
925 if (fwspec->param_count < 3)
926 return -EINVAL;
912 927
913 if (d->of_node != controller) 928 /* Get the interrupt number and add 16 to skip over SGIs */
914 return -EINVAL; 929 *hwirq = fwspec->param[1] + 16;
915 if (intsize < 3)
916 return -EINVAL;
917 930
918 /* Get the interrupt number and add 16 to skip over SGIs */ 931 /*
919 *out_hwirq = intspec[1] + 16; 932 * For SPIs, we need to add 16 more to get the GIC irq
933 * ID number
934 */
935 if (!fwspec->param[0])
936 *hwirq += 16;
920 937
921 /* For SPIs, we need to add 16 more to get the GIC irq ID number */ 938 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
922 if (!intspec[0]) 939 return 0;
923 *out_hwirq += 16; 940 }
924 941
925 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 942 if (fwspec->fwnode->type == FWNODE_IRQCHIP) {
943 if(fwspec->param_count != 2)
944 return -EINVAL;
926 945
927 return ret; 946 *hwirq = fwspec->param[0];
947 *type = fwspec->param[1];
948 return 0;
949 }
950
951 return -EINVAL;
928} 952}
929 953
930#ifdef CONFIG_SMP 954#ifdef CONFIG_SMP
@@ -952,10 +976,9 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
952 int i, ret; 976 int i, ret;
953 irq_hw_number_t hwirq; 977 irq_hw_number_t hwirq;
954 unsigned int type = IRQ_TYPE_NONE; 978 unsigned int type = IRQ_TYPE_NONE;
955 struct of_phandle_args *irq_data = arg; 979 struct irq_fwspec *fwspec = arg;
956 980
957 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, 981 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
958 irq_data->args_count, &hwirq, &type);
959 if (ret) 982 if (ret)
960 return ret; 983 return ret;
961 984
@@ -966,7 +989,7 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
966} 989}
967 990
968static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { 991static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
969 .xlate = gic_irq_domain_xlate, 992 .translate = gic_irq_domain_translate,
970 .alloc = gic_irq_domain_alloc, 993 .alloc = gic_irq_domain_alloc,
971 .free = irq_domain_free_irqs_top, 994 .free = irq_domain_free_irqs_top,
972}; 995};
@@ -974,12 +997,11 @@ static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
974static const struct irq_domain_ops gic_irq_domain_ops = { 997static const struct irq_domain_ops gic_irq_domain_ops = {
975 .map = gic_irq_domain_map, 998 .map = gic_irq_domain_map,
976 .unmap = gic_irq_domain_unmap, 999 .unmap = gic_irq_domain_unmap,
977 .xlate = gic_irq_domain_xlate,
978}; 1000};
979 1001
980static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, 1002static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
981 void __iomem *dist_base, void __iomem *cpu_base, 1003 void __iomem *dist_base, void __iomem *cpu_base,
982 u32 percpu_offset, struct device_node *node) 1004 u32 percpu_offset, struct fwnode_handle *handle)
983{ 1005{
984 irq_hw_number_t hwirq_base; 1006 irq_hw_number_t hwirq_base;
985 struct gic_chip_data *gic; 1007 struct gic_chip_data *gic;
@@ -987,6 +1009,8 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
987 1009
988 BUG_ON(gic_nr >= MAX_GIC_NR); 1010 BUG_ON(gic_nr >= MAX_GIC_NR);
989 1011
1012 gic_check_cpu_features();
1013
990 gic = &gic_data[gic_nr]; 1014 gic = &gic_data[gic_nr];
991#ifdef CONFIG_GIC_NON_BANKED 1015#ifdef CONFIG_GIC_NON_BANKED
992 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 1016 if (percpu_offset) { /* Frankein-GIC without banked registers... */
@@ -1031,11 +1055,11 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1031 gic_irqs = 1020; 1055 gic_irqs = 1020;
1032 gic->gic_irqs = gic_irqs; 1056 gic->gic_irqs = gic_irqs;
1033 1057
1034 if (node) { /* DT case */ 1058 if (handle) { /* DT/ACPI */
1035 gic->domain = irq_domain_add_linear(node, gic_irqs, 1059 gic->domain = irq_domain_create_linear(handle, gic_irqs,
1036 &gic_irq_domain_hierarchy_ops, 1060 &gic_irq_domain_hierarchy_ops,
1037 gic); 1061 gic);
1038 } else { /* Non-DT case */ 1062 } else { /* Legacy support */
1039 /* 1063 /*
1040 * For primary GICs, skip over SGIs. 1064 * For primary GICs, skip over SGIs.
1041 * For secondary GICs, skip over PPIs, too. 1065 * For secondary GICs, skip over PPIs, too.
@@ -1058,7 +1082,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1058 irq_base = irq_start; 1082 irq_base = irq_start;
1059 } 1083 }
1060 1084
1061 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, 1085 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
1062 hwirq_base, &gic_irq_domain_ops, gic); 1086 hwirq_base, &gic_irq_domain_ops, gic);
1063 } 1087 }
1064 1088
@@ -1087,17 +1111,15 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1087 gic_pm_init(gic); 1111 gic_pm_init(gic);
1088} 1112}
1089 1113
1090void __init gic_init_bases(unsigned int gic_nr, int irq_start, 1114void __init gic_init(unsigned int gic_nr, int irq_start,
1091 void __iomem *dist_base, void __iomem *cpu_base, 1115 void __iomem *dist_base, void __iomem *cpu_base)
1092 u32 percpu_offset, struct device_node *node)
1093{ 1116{
1094 /* 1117 /*
1095 * Non-DT/ACPI systems won't run a hypervisor, so let's not 1118 * Non-DT/ACPI systems won't run a hypervisor, so let's not
1096 * bother with these... 1119 * bother with these...
1097 */ 1120 */
1098 static_key_slow_dec(&supports_deactivate); 1121 static_key_slow_dec(&supports_deactivate);
1099 __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 1122 __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL);
1100 percpu_offset, node);
1101} 1123}
1102 1124
1103#ifdef CONFIG_OF 1125#ifdef CONFIG_OF
@@ -1168,7 +1190,8 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1168 if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) 1190 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
1169 percpu_offset = 0; 1191 percpu_offset = 0;
1170 1192
1171 __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); 1193 __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset,
1194 &node->fwnode);
1172 if (!gic_cnt) 1195 if (!gic_cnt)
1173 gic_init_physaddr(node); 1196 gic_init_physaddr(node);
1174 1197
@@ -1191,6 +1214,7 @@ IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1191IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); 1214IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1192IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1215IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1193IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1216IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1217IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
1194 1218
1195#endif 1219#endif
1196 1220
@@ -1242,6 +1266,7 @@ int __init
1242gic_v2_acpi_init(struct acpi_table_header *table) 1266gic_v2_acpi_init(struct acpi_table_header *table)
1243{ 1267{
1244 void __iomem *cpu_base, *dist_base; 1268 void __iomem *cpu_base, *dist_base;
1269 struct fwnode_handle *domain_handle;
1245 int count; 1270 int count;
1246 1271
1247 /* Collect CPU base addresses */ 1272 /* Collect CPU base addresses */
@@ -1292,14 +1317,19 @@ gic_v2_acpi_init(struct acpi_table_header *table)
1292 static_key_slow_dec(&supports_deactivate); 1317 static_key_slow_dec(&supports_deactivate);
1293 1318
1294 /* 1319 /*
1295 * Initialize zero GIC instance (no multi-GIC support). Also, set GIC 1320 * Initialize GIC instance zero (no multi-GIC support).
1296 * as default IRQ domain to allow for GSI registration and GSI to IRQ
1297 * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
1298 */ 1321 */
1299 __gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL); 1322 domain_handle = irq_domain_alloc_fwnode(dist_base);
1300 irq_set_default_host(gic_data[0].domain); 1323 if (!domain_handle) {
1324 pr_err("Unable to allocate domain handle\n");
1325 iounmap(cpu_base);
1326 iounmap(dist_base);
1327 return -ENOMEM;
1328 }
1329
1330 __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle);
1301 1331
1302 acpi_irq_model = ACPI_IRQ_MODEL_GIC; 1332 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
1303 return 0; 1333 return 0;
1304} 1334}
1305#endif 1335#endif
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 8f3ca8f3a62b..9688d2e2a636 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -325,7 +325,7 @@ static int hip04_irq_domain_xlate(struct irq_domain *d,
325{ 325{
326 unsigned long ret = 0; 326 unsigned long ret = 0;
327 327
328 if (d->of_node != controller) 328 if (irq_domain_get_of_node(d) != controller)
329 return -EINVAL; 329 return -EINVAL;
330 if (intsize < 3) 330 if (intsize < 3)
331 return -EINVAL; 331 return -EINVAL;
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index e484fd255321..6b304eb39bd2 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -377,8 +377,8 @@ int __init i8259_of_init(struct device_node *node, struct device_node *parent)
377 } 377 }
378 378
379 domain = __init_i8259_irqs(node); 379 domain = __init_i8259_irqs(node);
380 irq_set_handler_data(parent_irq, domain); 380 irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch,
381 irq_set_chained_handler(parent_irq, i8259_irq_dispatch); 381 domain);
382 return 0; 382 return 0;
383} 383}
384IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init); 384IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index e48d3305456f..15af9a9753e5 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -150,49 +150,42 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
150#endif 150#endif
151}; 151};
152 152
153static int imx_gpcv2_domain_xlate(struct irq_domain *domain, 153static int imx_gpcv2_domain_translate(struct irq_domain *d,
154 struct device_node *controller, 154 struct irq_fwspec *fwspec,
155 const u32 *intspec, 155 unsigned long *hwirq,
156 unsigned int intsize, 156 unsigned int *type)
157 unsigned long *out_hwirq,
158 unsigned int *out_type)
159{ 157{
160 /* Shouldn't happen, really... */ 158 if (is_of_node(fwspec->fwnode)) {
161 if (domain->of_node != controller) 159 if (fwspec->param_count != 3)
162 return -EINVAL; 160 return -EINVAL;
163 161
164 /* Not GIC compliant */ 162 /* No PPI should point to this domain */
165 if (intsize != 3) 163 if (fwspec->param[0] != 0)
166 return -EINVAL; 164 return -EINVAL;
167 165
168 /* No PPI should point to this domain */ 166 *hwirq = fwspec->param[1];
169 if (intspec[0] != 0) 167 *type = fwspec->param[2];
170 return -EINVAL; 168 return 0;
169 }
171 170
172 *out_hwirq = intspec[1]; 171 return -EINVAL;
173 *out_type = intspec[2];
174 return 0;
175} 172}
176 173
177static int imx_gpcv2_domain_alloc(struct irq_domain *domain, 174static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
178 unsigned int irq, unsigned int nr_irqs, 175 unsigned int irq, unsigned int nr_irqs,
179 void *data) 176 void *data)
180{ 177{
181 struct of_phandle_args *args = data; 178 struct irq_fwspec *fwspec = data;
182 struct of_phandle_args parent_args; 179 struct irq_fwspec parent_fwspec;
183 irq_hw_number_t hwirq; 180 irq_hw_number_t hwirq;
181 unsigned int type;
182 int err;
184 int i; 183 int i;
185 184
186 /* Not GIC compliant */ 185 err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type);
187 if (args->args_count != 3) 186 if (err)
188 return -EINVAL; 187 return err;
189
190 /* No PPI should point to this domain */
191 if (args->args[0] != 0)
192 return -EINVAL;
193 188
194 /* Can't deal with this */
195 hwirq = args->args[1];
196 if (hwirq >= GPC_MAX_IRQS) 189 if (hwirq >= GPC_MAX_IRQS)
197 return -EINVAL; 190 return -EINVAL;
198 191
@@ -201,15 +194,16 @@ static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
201 &gpcv2_irqchip_data_chip, domain->host_data); 194 &gpcv2_irqchip_data_chip, domain->host_data);
202 } 195 }
203 196
204 parent_args = *args; 197 parent_fwspec = *fwspec;
205 parent_args.np = domain->parent->of_node; 198 parent_fwspec.fwnode = domain->parent->fwnode;
206 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args); 199 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
200 &parent_fwspec);
207} 201}
208 202
209static struct irq_domain_ops gpcv2_irqchip_data_domain_ops = { 203static struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
210 .xlate = imx_gpcv2_domain_xlate, 204 .translate = imx_gpcv2_domain_translate,
211 .alloc = imx_gpcv2_domain_alloc, 205 .alloc = imx_gpcv2_domain_alloc,
212 .free = irq_domain_free_irqs_common, 206 .free = irq_domain_free_irqs_common,
213}; 207};
214 208
215static int __init imx_gpcv2_irqchip_init(struct device_node *node, 209static int __init imx_gpcv2_irqchip_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index c8753da4c156..63ac73b1d9c8 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -67,22 +67,25 @@ static struct irq_chip mtk_sysirq_chip = {
67 .irq_set_affinity = irq_chip_set_affinity_parent, 67 .irq_set_affinity = irq_chip_set_affinity_parent,
68}; 68};
69 69
70static int mtk_sysirq_domain_xlate(struct irq_domain *d, 70static int mtk_sysirq_domain_translate(struct irq_domain *d,
71 struct device_node *controller, 71 struct irq_fwspec *fwspec,
72 const u32 *intspec, unsigned int intsize, 72 unsigned long *hwirq,
73 unsigned long *out_hwirq, 73 unsigned int *type)
74 unsigned int *out_type)
75{ 74{
76 if (intsize != 3) 75 if (is_of_node(fwspec->fwnode)) {
77 return -EINVAL; 76 if (fwspec->param_count != 3)
77 return -EINVAL;
78 78
79 /* sysirq doesn't support PPI */ 79 /* No PPI should point to this domain */
80 if (intspec[0]) 80 if (fwspec->param[0] != 0)
81 return -EINVAL; 81 return -EINVAL;
82 82
83 *out_hwirq = intspec[1]; 83 *hwirq = fwspec->param[1];
84 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 84 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
85 return 0; 85 return 0;
86 }
87
88 return -EINVAL;
86} 89}
87 90
88static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq, 91static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -90,30 +93,30 @@ static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
90{ 93{
91 int i; 94 int i;
92 irq_hw_number_t hwirq; 95 irq_hw_number_t hwirq;
93 struct of_phandle_args *irq_data = arg; 96 struct irq_fwspec *fwspec = arg;
94 struct of_phandle_args gic_data = *irq_data; 97 struct irq_fwspec gic_fwspec = *fwspec;
95 98
96 if (irq_data->args_count != 3) 99 if (fwspec->param_count != 3)
97 return -EINVAL; 100 return -EINVAL;
98 101
99 /* sysirq doesn't support PPI */ 102 /* sysirq doesn't support PPI */
100 if (irq_data->args[0]) 103 if (fwspec->param[0])
101 return -EINVAL; 104 return -EINVAL;
102 105
103 hwirq = irq_data->args[1]; 106 hwirq = fwspec->param[1];
104 for (i = 0; i < nr_irqs; i++) 107 for (i = 0; i < nr_irqs; i++)
105 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 108 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
106 &mtk_sysirq_chip, 109 &mtk_sysirq_chip,
107 domain->host_data); 110 domain->host_data);
108 111
109 gic_data.np = domain->parent->of_node; 112 gic_fwspec.fwnode = domain->parent->fwnode;
110 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data); 113 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec);
111} 114}
112 115
113static const struct irq_domain_ops sysirq_domain_ops = { 116static const struct irq_domain_ops sysirq_domain_ops = {
114 .xlate = mtk_sysirq_domain_xlate, 117 .translate = mtk_sysirq_domain_translate,
115 .alloc = mtk_sysirq_domain_alloc, 118 .alloc = mtk_sysirq_domain_alloc,
116 .free = irq_domain_free_irqs_common, 119 .free = irq_domain_free_irqs_common,
117}; 120};
118 121
119static int __init mtk_sysirq_of_init(struct device_node *node, 122static int __init mtk_sysirq_of_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 604df63e2edf..c22e2d40cb30 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved. 2 * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de>
4 * Add Alphascale ASM9260 support.
3 * 5 *
4 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -28,20 +30,64 @@
28#include <linux/stmp_device.h> 30#include <linux/stmp_device.h>
29#include <asm/exception.h> 31#include <asm/exception.h>
30 32
33#include "alphascale_asm9260-icoll.h"
34
35/*
36 * this device provide 4 offsets for each register:
37 * 0x0 - plain read write mode
38 * 0x4 - set mode, OR logic.
39 * 0x8 - clr mode, XOR logic.
40 * 0xc - togle mode.
41 */
42#define SET_REG 4
43#define CLR_REG 8
44
31#define HW_ICOLL_VECTOR 0x0000 45#define HW_ICOLL_VECTOR 0x0000
32#define HW_ICOLL_LEVELACK 0x0010 46#define HW_ICOLL_LEVELACK 0x0010
33#define HW_ICOLL_CTRL 0x0020 47#define HW_ICOLL_CTRL 0x0020
34#define HW_ICOLL_STAT_OFFSET 0x0070 48#define HW_ICOLL_STAT_OFFSET 0x0070
35#define HW_ICOLL_INTERRUPTn_SET(n) (0x0124 + (n) * 0x10) 49#define HW_ICOLL_INTERRUPT0 0x0120
36#define HW_ICOLL_INTERRUPTn_CLR(n) (0x0128 + (n) * 0x10) 50#define HW_ICOLL_INTERRUPTn(n) ((n) * 0x10)
37#define BM_ICOLL_INTERRUPTn_ENABLE 0x00000004 51#define BM_ICOLL_INTR_ENABLE BIT(2)
38#define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1 52#define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1
39 53
40#define ICOLL_NUM_IRQS 128 54#define ICOLL_NUM_IRQS 128
41 55
42static void __iomem *icoll_base; 56enum icoll_type {
57 ICOLL,
58 ASM9260_ICOLL,
59};
60
61struct icoll_priv {
62 void __iomem *vector;
63 void __iomem *levelack;
64 void __iomem *ctrl;
65 void __iomem *stat;
66 void __iomem *intr;
67 void __iomem *clear;
68 enum icoll_type type;
69};
70
71static struct icoll_priv icoll_priv;
43static struct irq_domain *icoll_domain; 72static struct irq_domain *icoll_domain;
44 73
74/* calculate bit offset depending on number of intterupt per register */
75static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
76{
77 /*
78 * mask lower part of hwirq to convert it
79 * in 0, 1, 2 or 3 and then multiply it by 8 (or shift by 3)
80 */
81 return bit << ((d->hwirq & 3) << 3);
82}
83
84/* calculate mem offset depending on number of intterupt per register */
85static void __iomem *icoll_intr_reg(struct irq_data *d)
86{
87 /* offset = hwirq / intr_per_reg * 0x10 */
88 return icoll_priv.intr + ((d->hwirq >> 2) * 0x10);
89}
90
45static void icoll_ack_irq(struct irq_data *d) 91static void icoll_ack_irq(struct irq_data *d)
46{ 92{
47 /* 93 /*
@@ -50,19 +96,35 @@ static void icoll_ack_irq(struct irq_data *d)
50 * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally. 96 * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally.
51 */ 97 */
52 __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0, 98 __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0,
53 icoll_base + HW_ICOLL_LEVELACK); 99 icoll_priv.levelack);
54} 100}
55 101
56static void icoll_mask_irq(struct irq_data *d) 102static void icoll_mask_irq(struct irq_data *d)
57{ 103{
58 __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, 104 __raw_writel(BM_ICOLL_INTR_ENABLE,
59 icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->hwirq)); 105 icoll_priv.intr + CLR_REG + HW_ICOLL_INTERRUPTn(d->hwirq));
60} 106}
61 107
62static void icoll_unmask_irq(struct irq_data *d) 108static void icoll_unmask_irq(struct irq_data *d)
63{ 109{
64 __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, 110 __raw_writel(BM_ICOLL_INTR_ENABLE,
65 icoll_base + HW_ICOLL_INTERRUPTn_SET(d->hwirq)); 111 icoll_priv.intr + SET_REG + HW_ICOLL_INTERRUPTn(d->hwirq));
112}
113
114static void asm9260_mask_irq(struct irq_data *d)
115{
116 __raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE),
117 icoll_intr_reg(d) + CLR_REG);
118}
119
120static void asm9260_unmask_irq(struct irq_data *d)
121{
122 __raw_writel(ASM9260_BM_CLEAR_BIT(d->hwirq),
123 icoll_priv.clear +
124 ASM9260_HW_ICOLL_CLEARn(d->hwirq));
125
126 __raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE),
127 icoll_intr_reg(d) + SET_REG);
66} 128}
67 129
68static struct irq_chip mxs_icoll_chip = { 130static struct irq_chip mxs_icoll_chip = {
@@ -71,19 +133,32 @@ static struct irq_chip mxs_icoll_chip = {
71 .irq_unmask = icoll_unmask_irq, 133 .irq_unmask = icoll_unmask_irq,
72}; 134};
73 135
136static struct irq_chip asm9260_icoll_chip = {
137 .irq_ack = icoll_ack_irq,
138 .irq_mask = asm9260_mask_irq,
139 .irq_unmask = asm9260_unmask_irq,
140};
141
74asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) 142asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
75{ 143{
76 u32 irqnr; 144 u32 irqnr;
77 145
78 irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); 146 irqnr = __raw_readl(icoll_priv.stat);
79 __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); 147 __raw_writel(irqnr, icoll_priv.vector);
80 handle_domain_irq(icoll_domain, irqnr, regs); 148 handle_domain_irq(icoll_domain, irqnr, regs);
81} 149}
82 150
83static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, 151static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
84 irq_hw_number_t hw) 152 irq_hw_number_t hw)
85{ 153{
86 irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq); 154 struct irq_chip *chip;
155
156 if (icoll_priv.type == ICOLL)
157 chip = &mxs_icoll_chip;
158 else
159 chip = &asm9260_icoll_chip;
160
161 irq_set_chip_and_handler(virq, chip, handle_level_irq);
87 162
88 return 0; 163 return 0;
89} 164}
@@ -93,20 +168,80 @@ static const struct irq_domain_ops icoll_irq_domain_ops = {
93 .xlate = irq_domain_xlate_onecell, 168 .xlate = irq_domain_xlate_onecell,
94}; 169};
95 170
171static void __init icoll_add_domain(struct device_node *np,
172 int num)
173{
174 icoll_domain = irq_domain_add_linear(np, num,
175 &icoll_irq_domain_ops, NULL);
176
177 if (!icoll_domain)
178 panic("%s: unable to create irq domain", np->full_name);
179}
180
181static void __iomem * __init icoll_init_iobase(struct device_node *np)
182{
183 void __iomem *icoll_base;
184
185 icoll_base = of_io_request_and_map(np, 0, np->name);
186 if (!icoll_base)
187 panic("%s: unable to map resource", np->full_name);
188 return icoll_base;
189}
190
96static int __init icoll_of_init(struct device_node *np, 191static int __init icoll_of_init(struct device_node *np,
97 struct device_node *interrupt_parent) 192 struct device_node *interrupt_parent)
98{ 193{
99 icoll_base = of_iomap(np, 0); 194 void __iomem *icoll_base;
100 WARN_ON(!icoll_base); 195
196 icoll_priv.type = ICOLL;
197
198 icoll_base = icoll_init_iobase(np);
199 icoll_priv.vector = icoll_base + HW_ICOLL_VECTOR;
200 icoll_priv.levelack = icoll_base + HW_ICOLL_LEVELACK;
201 icoll_priv.ctrl = icoll_base + HW_ICOLL_CTRL;
202 icoll_priv.stat = icoll_base + HW_ICOLL_STAT_OFFSET;
203 icoll_priv.intr = icoll_base + HW_ICOLL_INTERRUPT0;
204 icoll_priv.clear = NULL;
101 205
102 /* 206 /*
103 * Interrupt Collector reset, which initializes the priority 207 * Interrupt Collector reset, which initializes the priority
104 * for each irq to level 0. 208 * for each irq to level 0.
105 */ 209 */
106 stmp_reset_block(icoll_base + HW_ICOLL_CTRL); 210 stmp_reset_block(icoll_priv.ctrl);
107 211
108 icoll_domain = irq_domain_add_linear(np, ICOLL_NUM_IRQS, 212 icoll_add_domain(np, ICOLL_NUM_IRQS);
109 &icoll_irq_domain_ops, NULL); 213
110 return icoll_domain ? 0 : -ENODEV; 214 return 0;
111} 215}
112IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init); 216IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init);
217
218static int __init asm9260_of_init(struct device_node *np,
219 struct device_node *interrupt_parent)
220{
221 void __iomem *icoll_base;
222 int i;
223
224 icoll_priv.type = ASM9260_ICOLL;
225
226 icoll_base = icoll_init_iobase(np);
227 icoll_priv.vector = icoll_base + ASM9260_HW_ICOLL_VECTOR;
228 icoll_priv.levelack = icoll_base + ASM9260_HW_ICOLL_LEVELACK;
229 icoll_priv.ctrl = icoll_base + ASM9260_HW_ICOLL_CTRL;
230 icoll_priv.stat = icoll_base + ASM9260_HW_ICOLL_STAT_OFFSET;
231 icoll_priv.intr = icoll_base + ASM9260_HW_ICOLL_INTERRUPT0;
232 icoll_priv.clear = icoll_base + ASM9260_HW_ICOLL_CLEAR0;
233
234 writel_relaxed(ASM9260_BM_CTRL_IRQ_ENABLE,
235 icoll_priv.ctrl);
236 /*
237 * ASM9260 don't provide reset bit. So, we need to set level 0
238 * manually.
239 */
240 for (i = 0; i < 16 * 0x10; i += 0x10)
241 writel(0, icoll_priv.intr + i);
242
243 icoll_add_domain(np, ASM9260_NUM_IRQS);
244
245 return 0;
246}
247IRQCHIP_DECLARE(asm9260, "alphascale,asm9260-icoll", asm9260_of_init);
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index a878b8d03868..b1777104fd9f 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -48,16 +48,26 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
48 handle_IRQ(irq, regs); 48 handle_IRQ(irq, regs);
49} 49}
50 50
51static int nvic_irq_domain_translate(struct irq_domain *d,
52 struct irq_fwspec *fwspec,
53 unsigned long *hwirq, unsigned int *type)
54{
55 if (WARN_ON(fwspec->param_count < 1))
56 return -EINVAL;
57 *hwirq = fwspec->param[0];
58 *type = IRQ_TYPE_NONE;
59 return 0;
60}
61
51static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 62static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
52 unsigned int nr_irqs, void *arg) 63 unsigned int nr_irqs, void *arg)
53{ 64{
54 int i, ret; 65 int i, ret;
55 irq_hw_number_t hwirq; 66 irq_hw_number_t hwirq;
56 unsigned int type = IRQ_TYPE_NONE; 67 unsigned int type = IRQ_TYPE_NONE;
57 struct of_phandle_args *irq_data = arg; 68 struct irq_fwspec *fwspec = arg;
58 69
59 ret = irq_domain_xlate_onecell(domain, irq_data->np, irq_data->args, 70 ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type);
60 irq_data->args_count, &hwirq, &type);
61 if (ret) 71 if (ret)
62 return ret; 72 return ret;
63 73
@@ -68,7 +78,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
68} 78}
69 79
70static const struct irq_domain_ops nvic_irq_domain_ops = { 80static const struct irq_domain_ops nvic_irq_domain_ops = {
71 .xlate = irq_domain_xlate_onecell, 81 .translate = nvic_irq_domain_translate,
72 .alloc = nvic_irq_domain_alloc, 82 .alloc = nvic_irq_domain_alloc,
73 .free = irq_domain_free_irqs_top, 83 .free = irq_domain_free_irqs_top,
74}; 84};
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 9525335723f6..c325806561be 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -361,14 +361,16 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = {
361 .xlate = irq_domain_xlate_twocell, 361 .xlate = irq_domain_xlate_twocell,
362}; 362};
363 363
364static const struct intc_irqpin_irlm_config intc_irqpin_irlm_r8a7779 = { 364static const struct intc_irqpin_irlm_config intc_irqpin_irlm_r8a777x = {
365 .irlm_bit = 23, /* ICR0.IRLM0 */ 365 .irlm_bit = 23, /* ICR0.IRLM0 */
366}; 366};
367 367
368static const struct of_device_id intc_irqpin_dt_ids[] = { 368static const struct of_device_id intc_irqpin_dt_ids[] = {
369 { .compatible = "renesas,intc-irqpin", }, 369 { .compatible = "renesas,intc-irqpin", },
370 { .compatible = "renesas,intc-irqpin-r8a7778",
371 .data = &intc_irqpin_irlm_r8a777x },
370 { .compatible = "renesas,intc-irqpin-r8a7779", 372 { .compatible = "renesas,intc-irqpin-r8a7779",
371 .data = &intc_irqpin_irlm_r8a7779 }, 373 .data = &intc_irqpin_irlm_r8a777x },
372 {}, 374 {},
373}; 375};
374MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids); 376MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 35bf97ba4a3d..52304b139aa4 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -62,33 +62,20 @@ struct irqc_priv {
62 struct irqc_irq irq[IRQC_IRQ_MAX]; 62 struct irqc_irq irq[IRQC_IRQ_MAX];
63 unsigned int number_of_irqs; 63 unsigned int number_of_irqs;
64 struct platform_device *pdev; 64 struct platform_device *pdev;
65 struct irq_chip irq_chip; 65 struct irq_chip_generic *gc;
66 struct irq_domain *irq_domain; 66 struct irq_domain *irq_domain;
67 struct clk *clk; 67 struct clk *clk;
68}; 68};
69 69
70static void irqc_dbg(struct irqc_irq *i, char *str) 70static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
71{
72 dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
73 str, i->requested_irq, i->hw_irq);
74}
75
76static void irqc_irq_enable(struct irq_data *d)
77{ 71{
78 struct irqc_priv *p = irq_data_get_irq_chip_data(d); 72 return data->domain->host_data;
79 int hw_irq = irqd_to_hwirq(d);
80
81 irqc_dbg(&p->irq[hw_irq], "enable");
82 iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_SET);
83} 73}
84 74
85static void irqc_irq_disable(struct irq_data *d) 75static void irqc_dbg(struct irqc_irq *i, char *str)
86{ 76{
87 struct irqc_priv *p = irq_data_get_irq_chip_data(d); 77 dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
88 int hw_irq = irqd_to_hwirq(d); 78 str, i->requested_irq, i->hw_irq);
89
90 irqc_dbg(&p->irq[hw_irq], "disable");
91 iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_STS);
92} 79}
93 80
94static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = { 81static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
@@ -101,7 +88,7 @@ static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
101 88
102static int irqc_irq_set_type(struct irq_data *d, unsigned int type) 89static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
103{ 90{
104 struct irqc_priv *p = irq_data_get_irq_chip_data(d); 91 struct irqc_priv *p = irq_data_to_priv(d);
105 int hw_irq = irqd_to_hwirq(d); 92 int hw_irq = irqd_to_hwirq(d);
106 unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK]; 93 unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
107 u32 tmp; 94 u32 tmp;
@@ -120,7 +107,7 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
120 107
121static int irqc_irq_set_wake(struct irq_data *d, unsigned int on) 108static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
122{ 109{
123 struct irqc_priv *p = irq_data_get_irq_chip_data(d); 110 struct irqc_priv *p = irq_data_to_priv(d);
124 int hw_irq = irqd_to_hwirq(d); 111 int hw_irq = irqd_to_hwirq(d);
125 112
126 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on); 113 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
@@ -153,35 +140,11 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
153 return IRQ_NONE; 140 return IRQ_NONE;
154} 141}
155 142
156/*
157 * This lock class tells lockdep that IRQC irqs are in a different
158 * category than their parents, so it won't report false recursion.
159 */
160static struct lock_class_key irqc_irq_lock_class;
161
162static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq,
163 irq_hw_number_t hw)
164{
165 struct irqc_priv *p = h->host_data;
166
167 irqc_dbg(&p->irq[hw], "map");
168 irq_set_chip_data(virq, h->host_data);
169 irq_set_lockdep_class(virq, &irqc_irq_lock_class);
170 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
171 return 0;
172}
173
174static const struct irq_domain_ops irqc_irq_domain_ops = {
175 .map = irqc_irq_domain_map,
176 .xlate = irq_domain_xlate_twocell,
177};
178
179static int irqc_probe(struct platform_device *pdev) 143static int irqc_probe(struct platform_device *pdev)
180{ 144{
181 struct irqc_priv *p; 145 struct irqc_priv *p;
182 struct resource *io; 146 struct resource *io;
183 struct resource *irq; 147 struct resource *irq;
184 struct irq_chip *irq_chip;
185 const char *name = dev_name(&pdev->dev); 148 const char *name = dev_name(&pdev->dev);
186 int ret; 149 int ret;
187 int k; 150 int k;
@@ -241,40 +204,51 @@ static int irqc_probe(struct platform_device *pdev)
241 204
242 p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */ 205 p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
243 206
244 irq_chip = &p->irq_chip;
245 irq_chip->name = name;
246 irq_chip->irq_mask = irqc_irq_disable;
247 irq_chip->irq_unmask = irqc_irq_enable;
248 irq_chip->irq_set_type = irqc_irq_set_type;
249 irq_chip->irq_set_wake = irqc_irq_set_wake;
250 irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND;
251
252 p->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 207 p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
253 p->number_of_irqs, 208 p->number_of_irqs,
254 &irqc_irq_domain_ops, p); 209 &irq_generic_chip_ops, p);
255 if (!p->irq_domain) { 210 if (!p->irq_domain) {
256 ret = -ENXIO; 211 ret = -ENXIO;
257 dev_err(&pdev->dev, "cannot initialize irq domain\n"); 212 dev_err(&pdev->dev, "cannot initialize irq domain\n");
258 goto err2; 213 goto err2;
259 } 214 }
260 215
216 ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
217 1, name, handle_level_irq,
218 0, 0, IRQ_GC_INIT_NESTED_LOCK);
219 if (ret) {
220 dev_err(&pdev->dev, "cannot allocate generic chip\n");
221 goto err3;
222 }
223
224 p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
225 p->gc->reg_base = p->cpu_int_base;
226 p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
227 p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
228 p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
229 p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
230 p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
231 p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
232 p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
233
261 /* request interrupts one by one */ 234 /* request interrupts one by one */
262 for (k = 0; k < p->number_of_irqs; k++) { 235 for (k = 0; k < p->number_of_irqs; k++) {
263 if (request_irq(p->irq[k].requested_irq, irqc_irq_handler, 236 if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
264 0, name, &p->irq[k])) { 237 0, name, &p->irq[k])) {
265 dev_err(&pdev->dev, "failed to request IRQ\n"); 238 dev_err(&pdev->dev, "failed to request IRQ\n");
266 ret = -ENOENT; 239 ret = -ENOENT;
267 goto err3; 240 goto err4;
268 } 241 }
269 } 242 }
270 243
271 dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); 244 dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
272 245
273 return 0; 246 return 0;
274err3: 247err4:
275 while (--k >= 0) 248 while (--k >= 0)
276 free_irq(p->irq[k].requested_irq, &p->irq[k]); 249 free_irq(p->irq[k].requested_irq, &p->irq[k]);
277 250
251err3:
278 irq_domain_remove(p->irq_domain); 252 irq_domain_remove(p->irq_domain);
279err2: 253err2:
280 iounmap(p->iomem); 254 iounmap(p->iomem);
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index 7154b011ddd2..c71914e8f596 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -311,7 +311,7 @@ static void s3c_irq_demux(struct irq_desc *desc)
311 * and one big domain for the dt case where the subintc 311 * and one big domain for the dt case where the subintc
312 * starts at hwirq number 32. 312 * starts at hwirq number 32.
313 */ 313 */
314 offset = (intc->domain->of_node) ? 32 : 0; 314 offset = irq_domain_get_of_node(intc->domain) ? 32 : 0;
315 315
316 chained_irq_enter(chip, desc); 316 chained_irq_enter(chip, desc);
317 317
@@ -342,7 +342,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
342 return false; 342 return false;
343 343
344 /* non-dt machines use individual domains */ 344 /* non-dt machines use individual domains */
345 if (!intc->domain->of_node) 345 if (!irq_domain_get_of_node(intc->domain))
346 intc_offset = 0; 346 intc_offset = 0;
347 347
348 /* We have a problem that the INTOFFSET register does not always 348 /* We have a problem that the INTOFFSET register does not always
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index c143dd58410c..4ef178078e5b 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -8,6 +8,9 @@
8 * warranty of any kind, whether express or implied. 8 * warranty of any kind, whether express or implied.
9 */ 9 */
10 10
11#define DRV_NAME "sunxi-nmi"
12#define pr_fmt(fmt) DRV_NAME ": " fmt
13
11#include <linux/bitops.h> 14#include <linux/bitops.h>
12#include <linux/device.h> 15#include <linux/device.h>
13#include <linux/io.h> 16#include <linux/io.h>
@@ -96,8 +99,8 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
96 break; 99 break;
97 default: 100 default:
98 irq_gc_unlock(gc); 101 irq_gc_unlock(gc);
99 pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", 102 pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
100 __func__, data->irq); 103 data->irq);
101 return -EBADR; 104 return -EBADR;
102 } 105 }
103 106
@@ -130,30 +133,29 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
130 133
131 domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); 134 domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
132 if (!domain) { 135 if (!domain) {
133 pr_err("%s: Could not register interrupt domain.\n", node->name); 136 pr_err("Could not register interrupt domain.\n");
134 return -ENOMEM; 137 return -ENOMEM;
135 } 138 }
136 139
137 ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name, 140 ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
138 handle_fasteoi_irq, clr, 0, 141 handle_fasteoi_irq, clr, 0,
139 IRQ_GC_INIT_MASK_CACHE); 142 IRQ_GC_INIT_MASK_CACHE);
140 if (ret) { 143 if (ret) {
141 pr_err("%s: Could not allocate generic interrupt chip.\n", 144 pr_err("Could not allocate generic interrupt chip.\n");
142 node->name); 145 goto fail_irqd_remove;
143 goto fail_irqd_remove;
144 } 146 }
145 147
146 irq = irq_of_parse_and_map(node, 0); 148 irq = irq_of_parse_and_map(node, 0);
147 if (irq <= 0) { 149 if (irq <= 0) {
148 pr_err("%s: unable to parse irq\n", node->name); 150 pr_err("unable to parse irq\n");
149 ret = -EINVAL; 151 ret = -EINVAL;
150 goto fail_irqd_remove; 152 goto fail_irqd_remove;
151 } 153 }
152 154
153 gc = irq_get_domain_generic_chip(domain, 0); 155 gc = irq_get_domain_generic_chip(domain, 0);
154 gc->reg_base = of_iomap(node, 0); 156 gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
155 if (!gc->reg_base) { 157 if (!gc->reg_base) {
156 pr_err("%s: unable to map resource\n", node->name); 158 pr_err("unable to map resource\n");
157 ret = -ENOMEM; 159 ret = -ENOMEM;
158 goto fail_irqd_remove; 160 goto fail_irqd_remove;
159 } 161 }
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 2fd89eb88f3a..121ec301372e 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -214,47 +214,50 @@ static struct irq_chip tegra_ictlr_chip = {
214 .irq_unmask = tegra_unmask, 214 .irq_unmask = tegra_unmask,
215 .irq_retrigger = tegra_retrigger, 215 .irq_retrigger = tegra_retrigger,
216 .irq_set_wake = tegra_set_wake, 216 .irq_set_wake = tegra_set_wake,
217 .irq_set_type = irq_chip_set_type_parent,
217 .flags = IRQCHIP_MASK_ON_SUSPEND, 218 .flags = IRQCHIP_MASK_ON_SUSPEND,
218#ifdef CONFIG_SMP 219#ifdef CONFIG_SMP
219 .irq_set_affinity = irq_chip_set_affinity_parent, 220 .irq_set_affinity = irq_chip_set_affinity_parent,
220#endif 221#endif
221}; 222};
222 223
223static int tegra_ictlr_domain_xlate(struct irq_domain *domain, 224static int tegra_ictlr_domain_translate(struct irq_domain *d,
224 struct device_node *controller, 225 struct irq_fwspec *fwspec,
225 const u32 *intspec, 226 unsigned long *hwirq,
226 unsigned int intsize, 227 unsigned int *type)
227 unsigned long *out_hwirq,
228 unsigned int *out_type)
229{ 228{
230 if (domain->of_node != controller) 229 if (is_of_node(fwspec->fwnode)) {
231 return -EINVAL; /* Shouldn't happen, really... */ 230 if (fwspec->param_count != 3)
232 if (intsize != 3) 231 return -EINVAL;
233 return -EINVAL; /* Not GIC compliant */
234 if (intspec[0] != GIC_SPI)
235 return -EINVAL; /* No PPI should point to this domain */
236 232
237 *out_hwirq = intspec[1]; 233 /* No PPI should point to this domain */
238 *out_type = intspec[2]; 234 if (fwspec->param[0] != 0)
239 return 0; 235 return -EINVAL;
236
237 *hwirq = fwspec->param[1];
238 *type = fwspec->param[2];
239 return 0;
240 }
241
242 return -EINVAL;
240} 243}
241 244
242static int tegra_ictlr_domain_alloc(struct irq_domain *domain, 245static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
243 unsigned int virq, 246 unsigned int virq,
244 unsigned int nr_irqs, void *data) 247 unsigned int nr_irqs, void *data)
245{ 248{
246 struct of_phandle_args *args = data; 249 struct irq_fwspec *fwspec = data;
247 struct of_phandle_args parent_args; 250 struct irq_fwspec parent_fwspec;
248 struct tegra_ictlr_info *info = domain->host_data; 251 struct tegra_ictlr_info *info = domain->host_data;
249 irq_hw_number_t hwirq; 252 irq_hw_number_t hwirq;
250 unsigned int i; 253 unsigned int i;
251 254
252 if (args->args_count != 3) 255 if (fwspec->param_count != 3)
253 return -EINVAL; /* Not GIC compliant */ 256 return -EINVAL; /* Not GIC compliant */
254 if (args->args[0] != GIC_SPI) 257 if (fwspec->param[0] != GIC_SPI)
255 return -EINVAL; /* No PPI should point to this domain */ 258 return -EINVAL; /* No PPI should point to this domain */
256 259
257 hwirq = args->args[1]; 260 hwirq = fwspec->param[1];
258 if (hwirq >= (num_ictlrs * 32)) 261 if (hwirq >= (num_ictlrs * 32))
259 return -EINVAL; 262 return -EINVAL;
260 263
@@ -266,9 +269,10 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
266 info->base[ictlr]); 269 info->base[ictlr]);
267 } 270 }
268 271
269 parent_args = *args; 272 parent_fwspec = *fwspec;
270 parent_args.np = domain->parent->of_node; 273 parent_fwspec.fwnode = domain->parent->fwnode;
271 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args); 274 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
275 &parent_fwspec);
272} 276}
273 277
274static void tegra_ictlr_domain_free(struct irq_domain *domain, 278static void tegra_ictlr_domain_free(struct irq_domain *domain,
@@ -284,9 +288,9 @@ static void tegra_ictlr_domain_free(struct irq_domain *domain,
284} 288}
285 289
286static const struct irq_domain_ops tegra_ictlr_domain_ops = { 290static const struct irq_domain_ops tegra_ictlr_domain_ops = {
287 .xlate = tegra_ictlr_domain_xlate, 291 .translate = tegra_ictlr_domain_translate,
288 .alloc = tegra_ictlr_domain_alloc, 292 .alloc = tegra_ictlr_domain_alloc,
289 .free = tegra_ictlr_domain_free, 293 .free = tegra_ictlr_domain_free,
290}; 294};
291 295
292static int __init tegra_ictlr_init(struct device_node *node, 296static int __init tegra_ictlr_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c
index 2c2255886401..56b5e3cb9de2 100644
--- a/drivers/irqchip/irq-vf610-mscm-ir.c
+++ b/drivers/irqchip/irq-vf610-mscm-ir.c
@@ -130,35 +130,51 @@ static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int vi
130{ 130{
131 int i; 131 int i;
132 irq_hw_number_t hwirq; 132 irq_hw_number_t hwirq;
133 struct of_phandle_args *irq_data = arg; 133 struct irq_fwspec *fwspec = arg;
134 struct of_phandle_args gic_data; 134 struct irq_fwspec parent_fwspec;
135 135
136 if (irq_data->args_count != 2) 136 if (!irq_domain_get_of_node(domain->parent))
137 return -EINVAL; 137 return -EINVAL;
138 138
139 hwirq = irq_data->args[0]; 139 if (fwspec->param_count != 2)
140 return -EINVAL;
141
142 hwirq = fwspec->param[0];
140 for (i = 0; i < nr_irqs; i++) 143 for (i = 0; i < nr_irqs; i++)
141 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 144 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
142 &vf610_mscm_ir_irq_chip, 145 &vf610_mscm_ir_irq_chip,
143 domain->host_data); 146 domain->host_data);
144 147
145 gic_data.np = domain->parent->of_node; 148 parent_fwspec.fwnode = domain->parent->fwnode;
146 149
147 if (mscm_ir_data->is_nvic) { 150 if (mscm_ir_data->is_nvic) {
148 gic_data.args_count = 1; 151 parent_fwspec.param_count = 1;
149 gic_data.args[0] = irq_data->args[0]; 152 parent_fwspec.param[0] = fwspec->param[0];
150 } else { 153 } else {
151 gic_data.args_count = 3; 154 parent_fwspec.param_count = 3;
152 gic_data.args[0] = GIC_SPI; 155 parent_fwspec.param[0] = GIC_SPI;
153 gic_data.args[1] = irq_data->args[0]; 156 parent_fwspec.param[1] = fwspec->param[0];
154 gic_data.args[2] = irq_data->args[1]; 157 parent_fwspec.param[2] = fwspec->param[1];
155 } 158 }
156 159
157 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data); 160 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
161 &parent_fwspec);
162}
163
164static int vf610_mscm_ir_domain_translate(struct irq_domain *d,
165 struct irq_fwspec *fwspec,
166 unsigned long *hwirq,
167 unsigned int *type)
168{
169 if (WARN_ON(fwspec->param_count < 2))
170 return -EINVAL;
171 *hwirq = fwspec->param[0];
172 *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
173 return 0;
158} 174}
159 175
160static const struct irq_domain_ops mscm_irq_domain_ops = { 176static const struct irq_domain_ops mscm_irq_domain_ops = {
161 .xlate = irq_domain_xlate_twocell, 177 .translate = vf610_mscm_ir_domain_translate,
162 .alloc = vf610_mscm_ir_domain_alloc, 178 .alloc = vf610_mscm_ir_domain_alloc,
163 .free = irq_domain_free_irqs_common, 179 .free = irq_domain_free_irqs_common,
164}; 180};
@@ -205,7 +221,8 @@ static int __init vf610_mscm_ir_of_init(struct device_node *node,
205 goto out_unmap; 221 goto out_unmap;
206 } 222 }
207 223
208 if (of_device_is_compatible(domain->parent->of_node, "arm,armv7m-nvic")) 224 if (of_device_is_compatible(irq_domain_get_of_node(domain->parent),
225 "arm,armv7m-nvic"))
209 mscm_ir_data->is_nvic = true; 226 mscm_ir_data->is_nvic = true;
210 227
211 cpu_pm_register_notifier(&mscm_ir_notifier_block); 228 cpu_pm_register_notifier(&mscm_ir_notifier_block);
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 18accb0a79cc..c53a53f6efb6 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,7 +1247,7 @@ static void
1247l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) 1247l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1248{ 1248{
1249 struct PStack *st = fi->userdata; 1249 struct PStack *st = fi->userdata;
1250 struct sk_buff *skb; 1250 struct sk_buff *skb, *nskb;
1251 struct Layer2 *l2 = &st->l2; 1251 struct Layer2 *l2 = &st->l2;
1252 u_char header[MAX_HEADER_LEN]; 1252 u_char header[MAX_HEADER_LEN];
1253 int i, hdr_space_needed; 1253 int i, hdr_space_needed;
@@ -1262,14 +1262,10 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1262 return; 1262 return;
1263 1263
1264 hdr_space_needed = l2headersize(l2, 0); 1264 hdr_space_needed = l2headersize(l2, 0);
1265 if (hdr_space_needed > skb_headroom(skb)) { 1265 nskb = skb_realloc_headroom(skb, hdr_space_needed);
1266 struct sk_buff *orig_skb = skb; 1266 if (!nskb) {
1267 1267 skb_queue_head(&l2->i_queue, skb);
1268 skb = skb_realloc_headroom(skb, hdr_space_needed); 1268 return;
1269 if (!skb) {
1270 dev_kfree_skb(orig_skb);
1271 return;
1272 }
1273 } 1269 }
1274 spin_lock_irqsave(&l2->lock, flags); 1270 spin_lock_irqsave(&l2->lock, flags);
1275 if (test_bit(FLG_MOD128, &l2->flag)) 1271 if (test_bit(FLG_MOD128, &l2->flag))
@@ -1282,7 +1278,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1282 p1); 1278 p1);
1283 dev_kfree_skb(l2->windowar[p1]); 1279 dev_kfree_skb(l2->windowar[p1]);
1284 } 1280 }
1285 l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC); 1281 l2->windowar[p1] = skb;
1286 1282
1287 i = sethdraddr(&st->l2, header, CMD); 1283 i = sethdraddr(&st->l2, header, CMD);
1288 1284
@@ -1295,8 +1291,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1295 l2->vs = (l2->vs + 1) % 8; 1291 l2->vs = (l2->vs + 1) % 8;
1296 } 1292 }
1297 spin_unlock_irqrestore(&l2->lock, flags); 1293 spin_unlock_irqrestore(&l2->lock, flags);
1298 memcpy(skb_push(skb, i), header, i); 1294 memcpy(skb_push(nskb, i), header, i);
1299 st->l2.l2l1(st, PH_PULL | INDICATION, skb); 1295 st->l2.l2l1(st, PH_PULL | INDICATION, nskb);
1300 test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); 1296 test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
1301 if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { 1297 if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
1302 FsmDelTimer(&st->l2.t203, 13); 1298 FsmDelTimer(&st->l2.t203, 13);
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 949cabb88f1c..5eb380a25903 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -1476,7 +1476,7 @@ static void
1476l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) 1476l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1477{ 1477{
1478 struct layer2 *l2 = fi->userdata; 1478 struct layer2 *l2 = fi->userdata;
1479 struct sk_buff *skb, *nskb, *oskb; 1479 struct sk_buff *skb, *nskb;
1480 u_char header[MAX_L2HEADER_LEN]; 1480 u_char header[MAX_L2HEADER_LEN];
1481 u_int i, p1; 1481 u_int i, p1;
1482 1482
@@ -1486,48 +1486,34 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1486 skb = skb_dequeue(&l2->i_queue); 1486 skb = skb_dequeue(&l2->i_queue);
1487 if (!skb) 1487 if (!skb)
1488 return; 1488 return;
1489
1490 if (test_bit(FLG_MOD128, &l2->flag))
1491 p1 = (l2->vs - l2->va) % 128;
1492 else
1493 p1 = (l2->vs - l2->va) % 8;
1494 p1 = (p1 + l2->sow) % l2->window;
1495 if (l2->windowar[p1]) {
1496 printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
1497 mISDNDevName4ch(&l2->ch), p1);
1498 dev_kfree_skb(l2->windowar[p1]);
1499 }
1500 l2->windowar[p1] = skb;
1501 i = sethdraddr(l2, header, CMD); 1489 i = sethdraddr(l2, header, CMD);
1502 if (test_bit(FLG_MOD128, &l2->flag)) { 1490 if (test_bit(FLG_MOD128, &l2->flag)) {
1503 header[i++] = l2->vs << 1; 1491 header[i++] = l2->vs << 1;
1504 header[i++] = l2->vr << 1; 1492 header[i++] = l2->vr << 1;
1493 } else
1494 header[i++] = (l2->vr << 5) | (l2->vs << 1);
1495 nskb = skb_realloc_headroom(skb, i);
1496 if (!nskb) {
1497 printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n",
1498 mISDNDevName4ch(&l2->ch), i);
1499 skb_queue_head(&l2->i_queue, skb);
1500 return;
1501 }
1502 if (test_bit(FLG_MOD128, &l2->flag)) {
1503 p1 = (l2->vs - l2->va) % 128;
1505 l2->vs = (l2->vs + 1) % 128; 1504 l2->vs = (l2->vs + 1) % 128;
1506 } else { 1505 } else {
1507 header[i++] = (l2->vr << 5) | (l2->vs << 1); 1506 p1 = (l2->vs - l2->va) % 8;
1508 l2->vs = (l2->vs + 1) % 8; 1507 l2->vs = (l2->vs + 1) % 8;
1509 } 1508 }
1510 1509 p1 = (p1 + l2->sow) % l2->window;
1511 nskb = skb_clone(skb, GFP_ATOMIC); 1510 if (l2->windowar[p1]) {
1512 p1 = skb_headroom(nskb); 1511 printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
1513 if (p1 >= i) 1512 mISDNDevName4ch(&l2->ch), p1);
1514 memcpy(skb_push(nskb, i), header, i); 1513 dev_kfree_skb(l2->windowar[p1]);
1515 else {
1516 printk(KERN_WARNING
1517 "%s: L2 pull_iqueue skb header(%d/%d) too short\n",
1518 mISDNDevName4ch(&l2->ch), i, p1);
1519 oskb = nskb;
1520 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
1521 if (!nskb) {
1522 dev_kfree_skb(oskb);
1523 printk(KERN_WARNING "%s: no skb mem in %s\n",
1524 mISDNDevName4ch(&l2->ch), __func__);
1525 return;
1526 }
1527 memcpy(skb_put(nskb, i), header, i);
1528 memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
1529 dev_kfree_skb(oskb);
1530 } 1514 }
1515 l2->windowar[p1] = skb;
1516 memcpy(skb_push(nskb, i), header, i);
1531 l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb); 1517 l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
1532 test_and_clear_bit(FLG_ACK_PEND, &l2->flag); 1518 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1533 if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) { 1519 if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 42990f2d0317..b1ab8bdf8251 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -556,6 +556,16 @@ config LEDS_KTD2692
556 556
557 Say Y to enable this driver. 557 Say Y to enable this driver.
558 558
559config LEDS_SEAD3
560 tristate "LED support for the MIPS SEAD 3 board"
561 depends on LEDS_CLASS && MIPS_SEAD3
562 help
563 Say Y here to include support for the FLED and PLED LEDs on SEAD3 eval
564 boards.
565
566 This driver can also be built as a module. If so the module
567 will be called leds-sead3.
568
559comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)" 569comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)"
560 570
561config LEDS_BLINKM 571config LEDS_BLINKM
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index b503f92dc2c4..e9d53092765d 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o
65obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o 65obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
66obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o 66obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
67obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o 67obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
68obj-$(CONFIG_LEDS_SEAD3) += leds-sead3.o
68 69
69# LED SPI Drivers 70# LED SPI Drivers
70obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o 71obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index ca51d58bed24..7385f98dd54b 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -102,70 +102,6 @@ static const struct attribute_group *led_groups[] = {
102 NULL, 102 NULL,
103}; 103};
104 104
105static void led_timer_function(unsigned long data)
106{
107 struct led_classdev *led_cdev = (void *)data;
108 unsigned long brightness;
109 unsigned long delay;
110
111 if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
112 led_set_brightness_async(led_cdev, LED_OFF);
113 return;
114 }
115
116 if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
117 led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
118 return;
119 }
120
121 brightness = led_get_brightness(led_cdev);
122 if (!brightness) {
123 /* Time to switch the LED on. */
124 if (led_cdev->delayed_set_value) {
125 led_cdev->blink_brightness =
126 led_cdev->delayed_set_value;
127 led_cdev->delayed_set_value = 0;
128 }
129 brightness = led_cdev->blink_brightness;
130 delay = led_cdev->blink_delay_on;
131 } else {
132 /* Store the current brightness value to be able
133 * to restore it when the delay_off period is over.
134 */
135 led_cdev->blink_brightness = brightness;
136 brightness = LED_OFF;
137 delay = led_cdev->blink_delay_off;
138 }
139
140 led_set_brightness_async(led_cdev, brightness);
141
142 /* Return in next iteration if led is in one-shot mode and we are in
143 * the final blink state so that the led is toggled each delay_on +
144 * delay_off milliseconds in worst case.
145 */
146 if (led_cdev->flags & LED_BLINK_ONESHOT) {
147 if (led_cdev->flags & LED_BLINK_INVERT) {
148 if (brightness)
149 led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
150 } else {
151 if (!brightness)
152 led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
153 }
154 }
155
156 mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
157}
158
159static void set_brightness_delayed(struct work_struct *ws)
160{
161 struct led_classdev *led_cdev =
162 container_of(ws, struct led_classdev, set_brightness_work);
163
164 led_stop_software_blink(led_cdev);
165
166 led_set_brightness_async(led_cdev, led_cdev->delayed_set_value);
167}
168
169/** 105/**
170 * led_classdev_suspend - suspend an led_classdev. 106 * led_classdev_suspend - suspend an led_classdev.
171 * @led_cdev: the led_classdev to suspend. 107 * @led_cdev: the led_classdev to suspend.
@@ -283,10 +219,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
283 219
284 led_update_brightness(led_cdev); 220 led_update_brightness(led_cdev);
285 221
286 INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed); 222 led_init_core(led_cdev);
287
288 setup_timer(&led_cdev->blink_timer, led_timer_function,
289 (unsigned long)led_cdev);
290 223
291#ifdef CONFIG_LEDS_TRIGGERS 224#ifdef CONFIG_LEDS_TRIGGERS
292 led_trigger_set_default(led_cdev); 225 led_trigger_set_default(led_cdev);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 549de7e24cfd..c1c3af089634 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -25,6 +25,70 @@ EXPORT_SYMBOL_GPL(leds_list_lock);
25LIST_HEAD(leds_list); 25LIST_HEAD(leds_list);
26EXPORT_SYMBOL_GPL(leds_list); 26EXPORT_SYMBOL_GPL(leds_list);
27 27
28static void led_timer_function(unsigned long data)
29{
30 struct led_classdev *led_cdev = (void *)data;
31 unsigned long brightness;
32 unsigned long delay;
33
34 if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
35 led_set_brightness_async(led_cdev, LED_OFF);
36 return;
37 }
38
39 if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
40 led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
41 return;
42 }
43
44 brightness = led_get_brightness(led_cdev);
45 if (!brightness) {
46 /* Time to switch the LED on. */
47 if (led_cdev->delayed_set_value) {
48 led_cdev->blink_brightness =
49 led_cdev->delayed_set_value;
50 led_cdev->delayed_set_value = 0;
51 }
52 brightness = led_cdev->blink_brightness;
53 delay = led_cdev->blink_delay_on;
54 } else {
55 /* Store the current brightness value to be able
56 * to restore it when the delay_off period is over.
57 */
58 led_cdev->blink_brightness = brightness;
59 brightness = LED_OFF;
60 delay = led_cdev->blink_delay_off;
61 }
62
63 led_set_brightness_async(led_cdev, brightness);
64
65 /* Return in next iteration if led is in one-shot mode and we are in
66 * the final blink state so that the led is toggled each delay_on +
67 * delay_off milliseconds in worst case.
68 */
69 if (led_cdev->flags & LED_BLINK_ONESHOT) {
70 if (led_cdev->flags & LED_BLINK_INVERT) {
71 if (brightness)
72 led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
73 } else {
74 if (!brightness)
75 led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
76 }
77 }
78
79 mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
80}
81
82static void set_brightness_delayed(struct work_struct *ws)
83{
84 struct led_classdev *led_cdev =
85 container_of(ws, struct led_classdev, set_brightness_work);
86
87 led_stop_software_blink(led_cdev);
88
89 led_set_brightness_async(led_cdev, led_cdev->delayed_set_value);
90}
91
28static void led_set_software_blink(struct led_classdev *led_cdev, 92static void led_set_software_blink(struct led_classdev *led_cdev,
29 unsigned long delay_on, 93 unsigned long delay_on,
30 unsigned long delay_off) 94 unsigned long delay_off)
@@ -72,6 +136,15 @@ static void led_blink_setup(struct led_classdev *led_cdev,
72 led_set_software_blink(led_cdev, *delay_on, *delay_off); 136 led_set_software_blink(led_cdev, *delay_on, *delay_off);
73} 137}
74 138
139void led_init_core(struct led_classdev *led_cdev)
140{
141 INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
142
143 setup_timer(&led_cdev->blink_timer, led_timer_function,
144 (unsigned long)led_cdev);
145}
146EXPORT_SYMBOL_GPL(led_init_core);
147
75void led_blink_set(struct led_classdev *led_cdev, 148void led_blink_set(struct led_classdev *led_cdev,
76 unsigned long *delay_on, 149 unsigned long *delay_on,
77 unsigned long *delay_off) 150 unsigned long *delay_off)
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 1497a09166d6..7870840e7cc9 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -142,6 +142,7 @@ static int pm860x_led_dt_init(struct platform_device *pdev,
142 of_property_read_u32(np, "marvell,88pm860x-iset", 142 of_property_read_u32(np, "marvell,88pm860x-iset",
143 &iset); 143 &iset);
144 data->iset = PM8606_LED_CURRENT(iset); 144 data->iset = PM8606_LED_CURRENT(iset);
145 of_node_put(np);
145 break; 146 break;
146 } 147 }
147 } 148 }
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 1793727bc9ae..c7ea5c626331 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -41,6 +41,11 @@
41#define BCM6328_SERIAL_LED_SHIFT_DIR BIT(16) 41#define BCM6328_SERIAL_LED_SHIFT_DIR BIT(16)
42#define BCM6328_LED_SHIFT_TEST BIT(30) 42#define BCM6328_LED_SHIFT_TEST BIT(30)
43#define BCM6328_LED_TEST BIT(31) 43#define BCM6328_LED_TEST BIT(31)
44#define BCM6328_INIT_MASK (BCM6328_SERIAL_LED_EN | \
45 BCM6328_SERIAL_LED_MUX | \
46 BCM6328_SERIAL_LED_CLK_NPOL | \
47 BCM6328_SERIAL_LED_DATA_PPOL | \
48 BCM6328_SERIAL_LED_SHIFT_DIR)
44 49
45#define BCM6328_LED_MODE_MASK 3 50#define BCM6328_LED_MODE_MASK 3
46#define BCM6328_LED_MODE_OFF 0 51#define BCM6328_LED_MODE_OFF 0
@@ -281,11 +286,10 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
281 "linux,default-trigger", 286 "linux,default-trigger",
282 NULL); 287 NULL);
283 288
289 spin_lock_irqsave(lock, flags);
284 if (!of_property_read_string(nc, "default-state", &state)) { 290 if (!of_property_read_string(nc, "default-state", &state)) {
285 spin_lock_irqsave(lock, flags);
286 if (!strcmp(state, "on")) { 291 if (!strcmp(state, "on")) {
287 led->cdev.brightness = LED_FULL; 292 led->cdev.brightness = LED_FULL;
288 bcm6328_led_mode(led, BCM6328_LED_MODE_ON);
289 } else if (!strcmp(state, "keep")) { 293 } else if (!strcmp(state, "keep")) {
290 void __iomem *mode; 294 void __iomem *mode;
291 unsigned long val, shift; 295 unsigned long val, shift;
@@ -296,21 +300,28 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
296 else 300 else
297 mode = mem + BCM6328_REG_MODE_LO; 301 mode = mem + BCM6328_REG_MODE_LO;
298 302
299 val = bcm6328_led_read(mode) >> (shift % 16); 303 val = bcm6328_led_read(mode) >>
304 BCM6328_LED_SHIFT(shift % 16);
300 val &= BCM6328_LED_MODE_MASK; 305 val &= BCM6328_LED_MODE_MASK;
301 if (val == BCM6328_LED_MODE_ON) 306 if ((led->active_low && val == BCM6328_LED_MODE_ON) ||
307 (!led->active_low && val == BCM6328_LED_MODE_OFF))
302 led->cdev.brightness = LED_FULL; 308 led->cdev.brightness = LED_FULL;
303 else { 309 else
304 led->cdev.brightness = LED_OFF; 310 led->cdev.brightness = LED_OFF;
305 bcm6328_led_mode(led, BCM6328_LED_MODE_OFF);
306 }
307 } else { 311 } else {
308 led->cdev.brightness = LED_OFF; 312 led->cdev.brightness = LED_OFF;
309 bcm6328_led_mode(led, BCM6328_LED_MODE_OFF);
310 } 313 }
311 spin_unlock_irqrestore(lock, flags); 314 } else {
315 led->cdev.brightness = LED_OFF;
312 } 316 }
313 317
318 if ((led->active_low && led->cdev.brightness == LED_FULL) ||
319 (!led->active_low && led->cdev.brightness == LED_OFF))
320 bcm6328_led_mode(led, BCM6328_LED_MODE_ON);
321 else
322 bcm6328_led_mode(led, BCM6328_LED_MODE_OFF);
323 spin_unlock_irqrestore(lock, flags);
324
314 led->cdev.brightness_set = bcm6328_led_set; 325 led->cdev.brightness_set = bcm6328_led_set;
315 led->cdev.blink_set = bcm6328_blink_set; 326 led->cdev.blink_set = bcm6328_blink_set;
316 327
@@ -360,9 +371,17 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
360 bcm6328_led_write(mem + BCM6328_REG_LNKACTSEL_LO, 0); 371 bcm6328_led_write(mem + BCM6328_REG_LNKACTSEL_LO, 0);
361 372
362 val = bcm6328_led_read(mem + BCM6328_REG_INIT); 373 val = bcm6328_led_read(mem + BCM6328_REG_INIT);
363 val &= ~BCM6328_SERIAL_LED_EN; 374 val &= ~(BCM6328_INIT_MASK);
364 if (of_property_read_bool(np, "brcm,serial-leds")) 375 if (of_property_read_bool(np, "brcm,serial-leds"))
365 val |= BCM6328_SERIAL_LED_EN; 376 val |= BCM6328_SERIAL_LED_EN;
377 if (of_property_read_bool(np, "brcm,serial-mux"))
378 val |= BCM6328_SERIAL_LED_MUX;
379 if (of_property_read_bool(np, "brcm,serial-clk-low"))
380 val |= BCM6328_SERIAL_LED_CLK_NPOL;
381 if (!of_property_read_bool(np, "brcm,serial-dat-low"))
382 val |= BCM6328_SERIAL_LED_DATA_PPOL;
383 if (!of_property_read_bool(np, "brcm,serial-shift-inv"))
384 val |= BCM6328_SERIAL_LED_SHIFT_DIR;
366 bcm6328_led_write(mem + BCM6328_REG_INIT, val); 385 bcm6328_led_write(mem + BCM6328_REG_INIT, val);
367 386
368 for_each_available_child_of_node(np, child) { 387 for_each_available_child_of_node(np, child) {
@@ -373,7 +392,7 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
373 continue; 392 continue;
374 393
375 if (reg >= BCM6328_LED_MAX_COUNT) { 394 if (reg >= BCM6328_LED_MAX_COUNT) {
376 dev_err(dev, "invalid LED (>= %d)\n", 395 dev_err(dev, "invalid LED (%u >= %d)\n", reg,
377 BCM6328_LED_MAX_COUNT); 396 BCM6328_LED_MAX_COUNT);
378 continue; 397 continue;
379 } 398 }
@@ -384,8 +403,10 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
384 rc = bcm6328_led(dev, child, reg, mem, lock, 403 rc = bcm6328_led(dev, child, reg, mem, lock,
385 blink_leds, blink_delay); 404 blink_leds, blink_delay);
386 405
387 if (rc < 0) 406 if (rc < 0) {
407 of_node_put(child);
388 return rc; 408 return rc;
409 }
389 } 410 }
390 411
391 return 0; 412 return 0;
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index 7ea3526702e0..82b4ee1bc87e 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -215,8 +215,10 @@ static int bcm6358_leds_probe(struct platform_device *pdev)
215 } 215 }
216 216
217 rc = bcm6358_led(dev, child, reg, mem, lock); 217 rc = bcm6358_led(dev, child, reg, mem, lock);
218 if (rc < 0) 218 if (rc < 0) {
219 of_node_put(child);
219 return rc; 220 return rc;
221 }
220 } 222 }
221 223
222 return 0; 224 return 0;
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index d97522080491..9be195707b39 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -36,7 +36,6 @@ static struct led_classdev qube_front_led = {
36static int cobalt_qube_led_probe(struct platform_device *pdev) 36static int cobalt_qube_led_probe(struct platform_device *pdev)
37{ 37{
38 struct resource *res; 38 struct resource *res;
39 int retval;
40 39
41 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 40 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
42 if (!res) 41 if (!res)
@@ -49,31 +48,11 @@ static int cobalt_qube_led_probe(struct platform_device *pdev)
49 led_value = LED_FRONT_LEFT | LED_FRONT_RIGHT; 48 led_value = LED_FRONT_LEFT | LED_FRONT_RIGHT;
50 writeb(led_value, led_port); 49 writeb(led_value, led_port);
51 50
52 retval = led_classdev_register(&pdev->dev, &qube_front_led); 51 return devm_led_classdev_register(&pdev->dev, &qube_front_led);
53 if (retval)
54 goto err_null;
55
56 return 0;
57
58err_null:
59 led_port = NULL;
60
61 return retval;
62}
63
64static int cobalt_qube_led_remove(struct platform_device *pdev)
65{
66 led_classdev_unregister(&qube_front_led);
67
68 if (led_port)
69 led_port = NULL;
70
71 return 0;
72} 52}
73 53
74static struct platform_driver cobalt_qube_led_driver = { 54static struct platform_driver cobalt_qube_led_driver = {
75 .probe = cobalt_qube_led_probe, 55 .probe = cobalt_qube_led_probe,
76 .remove = cobalt_qube_led_remove,
77 .driver = { 56 .driver = {
78 .name = "cobalt-qube-leds", 57 .name = "cobalt-qube-leds",
79 }, 58 },
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index af1876a3a77c..5db4515a4fd7 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -291,9 +291,22 @@ static int gpio_led_remove(struct platform_device *pdev)
291 return 0; 291 return 0;
292} 292}
293 293
294static void gpio_led_shutdown(struct platform_device *pdev)
295{
296 struct gpio_leds_priv *priv = platform_get_drvdata(pdev);
297 int i;
298
299 for (i = 0; i < priv->num_leds; i++) {
300 struct gpio_led_data *led = &priv->leds[i];
301
302 gpio_led_set(&led->cdev, LED_OFF);
303 }
304}
305
294static struct platform_driver gpio_led_driver = { 306static struct platform_driver gpio_led_driver = {
295 .probe = gpio_led_probe, 307 .probe = gpio_led_probe,
296 .remove = gpio_led_remove, 308 .remove = gpio_led_remove,
309 .shutdown = gpio_led_shutdown,
297 .driver = { 310 .driver = {
298 .name = "leds-gpio", 311 .name = "leds-gpio",
299 .of_match_table = of_gpio_leds_match, 312 .of_match_table = of_gpio_leds_match,
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index 0b84c0113126..a6b8db0e27f1 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -59,28 +59,15 @@ static int hp6xxled_probe(struct platform_device *pdev)
59{ 59{
60 int ret; 60 int ret;
61 61
62 ret = led_classdev_register(&pdev->dev, &hp6xx_red_led); 62 ret = devm_led_classdev_register(&pdev->dev, &hp6xx_red_led);
63 if (ret < 0) 63 if (ret < 0)
64 return ret; 64 return ret;
65 65
66 ret = led_classdev_register(&pdev->dev, &hp6xx_green_led); 66 return devm_led_classdev_register(&pdev->dev, &hp6xx_green_led);
67 if (ret < 0)
68 led_classdev_unregister(&hp6xx_red_led);
69
70 return ret;
71}
72
73static int hp6xxled_remove(struct platform_device *pdev)
74{
75 led_classdev_unregister(&hp6xx_red_led);
76 led_classdev_unregister(&hp6xx_green_led);
77
78 return 0;
79} 67}
80 68
81static struct platform_driver hp6xxled_driver = { 69static struct platform_driver hp6xxled_driver = {
82 .probe = hp6xxled_probe, 70 .probe = hp6xxled_probe,
83 .remove = hp6xxled_remove,
84 .driver = { 71 .driver = {
85 .name = "hp6xx-led", 72 .name = "hp6xx-led",
86 }, 73 },
diff --git a/drivers/leds/leds-ipaq-micro.c b/drivers/leds/leds-ipaq-micro.c
index 3776f516cd88..fa262b6b25eb 100644
--- a/drivers/leds/leds-ipaq-micro.c
+++ b/drivers/leds/leds-ipaq-micro.c
@@ -16,9 +16,9 @@
16#define LED_YELLOW 0x00 16#define LED_YELLOW 0x00
17#define LED_GREEN 0x01 17#define LED_GREEN 0x01
18 18
19#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */ 19#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */
20#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop set 0:disable, 1:enable */ 20#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop set 0:disable, 1:enable */
21#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */ 21#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */
22 22
23static void micro_leds_brightness_set(struct led_classdev *led_cdev, 23static void micro_leds_brightness_set(struct led_classdev *led_cdev,
24 enum led_brightness value) 24 enum led_brightness value)
@@ -79,14 +79,14 @@ static int micro_leds_blink_set(struct led_classdev *led_cdev,
79 }; 79 };
80 80
81 msg.tx_data[0] = LED_GREEN; 81 msg.tx_data[0] = LED_GREEN;
82 if (*delay_on > IPAQ_LED_MAX_DUTY || 82 if (*delay_on > IPAQ_LED_MAX_DUTY ||
83 *delay_off > IPAQ_LED_MAX_DUTY) 83 *delay_off > IPAQ_LED_MAX_DUTY)
84 return -EINVAL; 84 return -EINVAL;
85 85
86 if (*delay_on == 0 && *delay_off == 0) { 86 if (*delay_on == 0 && *delay_off == 0) {
87 *delay_on = 100; 87 *delay_on = 100;
88 *delay_off = 100; 88 *delay_off = 100;
89 } 89 }
90 90
91 msg.tx_data[1] = 0; 91 msg.tx_data[1] = 0;
92 if (*delay_on >= IPAQ_LED_MAX_DUTY) 92 if (*delay_on >= IPAQ_LED_MAX_DUTY)
@@ -111,7 +111,7 @@ static int micro_leds_probe(struct platform_device *pdev)
111{ 111{
112 int ret; 112 int ret;
113 113
114 ret = led_classdev_register(&pdev->dev, &micro_led); 114 ret = devm_led_classdev_register(&pdev->dev, &micro_led);
115 if (ret) { 115 if (ret) {
116 dev_err(&pdev->dev, "registering led failed: %d\n", ret); 116 dev_err(&pdev->dev, "registering led failed: %d\n", ret);
117 return ret; 117 return ret;
@@ -121,18 +121,11 @@ static int micro_leds_probe(struct platform_device *pdev)
121 return 0; 121 return 0;
122} 122}
123 123
124static int micro_leds_remove(struct platform_device *pdev)
125{
126 led_classdev_unregister(&micro_led);
127 return 0;
128}
129
130static struct platform_driver micro_leds_device_driver = { 124static struct platform_driver micro_leds_device_driver = {
131 .driver = { 125 .driver = {
132 .name = "ipaq-micro-leds", 126 .name = "ipaq-micro-leds",
133 }, 127 },
134 .probe = micro_leds_probe, 128 .probe = micro_leds_probe,
135 .remove = micro_leds_remove,
136}; 129};
137module_platform_driver(micro_leds_device_driver); 130module_platform_driver(micro_leds_device_driver);
138 131
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c
index 80ba048889d6..24c4b53a6b93 100644
--- a/drivers/leds/leds-locomo.c
+++ b/drivers/leds/leds-locomo.c
@@ -59,23 +59,13 @@ static int locomoled_probe(struct locomo_dev *ldev)
59{ 59{
60 int ret; 60 int ret;
61 61
62 ret = led_classdev_register(&ldev->dev, &locomo_led0); 62 ret = devm_led_classdev_register(&ldev->dev, &locomo_led0);
63 if (ret < 0) 63 if (ret < 0)
64 return ret; 64 return ret;
65 65
66 ret = led_classdev_register(&ldev->dev, &locomo_led1); 66 return devm_led_classdev_register(&ldev->dev, &locomo_led1);
67 if (ret < 0)
68 led_classdev_unregister(&locomo_led0);
69
70 return ret;
71} 67}
72 68
73static int locomoled_remove(struct locomo_dev *dev)
74{
75 led_classdev_unregister(&locomo_led0);
76 led_classdev_unregister(&locomo_led1);
77 return 0;
78}
79 69
80static struct locomo_driver locomoled_driver = { 70static struct locomo_driver locomoled_driver = {
81 .drv = { 71 .drv = {
@@ -83,7 +73,6 @@ static struct locomo_driver locomoled_driver = {
83 }, 73 },
84 .devid = LOCOMO_DEVID_LED, 74 .devid = LOCOMO_DEVID_LED,
85 .probe = locomoled_probe, 75 .probe = locomoled_probe,
86 .remove = locomoled_remove,
87}; 76};
88 77
89static int __init locomoled_init(void) 78static int __init locomoled_init(void)
diff --git a/drivers/leds/leds-menf21bmc.c b/drivers/leds/leds-menf21bmc.c
index 4b9eea815b1a..dec2a6e59676 100644
--- a/drivers/leds/leds-menf21bmc.c
+++ b/drivers/leds/leds-menf21bmc.c
@@ -87,36 +87,20 @@ static int menf21bmc_led_probe(struct platform_device *pdev)
87 leds[i].cdev.name = leds[i].name; 87 leds[i].cdev.name = leds[i].name;
88 leds[i].cdev.brightness_set = menf21bmc_led_set; 88 leds[i].cdev.brightness_set = menf21bmc_led_set;
89 leds[i].i2c_client = i2c_client; 89 leds[i].i2c_client = i2c_client;
90 ret = led_classdev_register(&pdev->dev, &leds[i].cdev); 90 ret = devm_led_classdev_register(&pdev->dev, &leds[i].cdev);
91 if (ret < 0) 91 if (ret < 0) {
92 goto err_free_leds; 92 dev_err(&pdev->dev, "failed to register LED device\n");
93 return ret;
94 }
93 } 95 }
94 dev_info(&pdev->dev, "MEN 140F21P00 BMC LED device enabled\n"); 96 dev_info(&pdev->dev, "MEN 140F21P00 BMC LED device enabled\n");
95 97
96 return 0; 98 return 0;
97 99
98err_free_leds:
99 dev_err(&pdev->dev, "failed to register LED device\n");
100
101 for (i = i - 1; i >= 0; i--)
102 led_classdev_unregister(&leds[i].cdev);
103
104 return ret;
105}
106
107static int menf21bmc_led_remove(struct platform_device *pdev)
108{
109 int i;
110
111 for (i = 0; i < ARRAY_SIZE(leds); i++)
112 led_classdev_unregister(&leds[i].cdev);
113
114 return 0;
115} 100}
116 101
117static struct platform_driver menf21bmc_led = { 102static struct platform_driver menf21bmc_led = {
118 .probe = menf21bmc_led_probe, 103 .probe = menf21bmc_led_probe,
119 .remove = menf21bmc_led_remove,
120 .driver = { 104 .driver = {
121 .name = "menf21bmc_led", 105 .name = "menf21bmc_led",
122 }, 106 },
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index ec3a2e8adcae..0d214c2e403c 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -39,18 +39,11 @@ static struct led_classdev net48xx_error_led = {
39 39
40static int net48xx_led_probe(struct platform_device *pdev) 40static int net48xx_led_probe(struct platform_device *pdev)
41{ 41{
42 return led_classdev_register(&pdev->dev, &net48xx_error_led); 42 return devm_led_classdev_register(&pdev->dev, &net48xx_error_led);
43}
44
45static int net48xx_led_remove(struct platform_device *pdev)
46{
47 led_classdev_unregister(&net48xx_error_led);
48 return 0;
49} 43}
50 44
51static struct platform_driver net48xx_led_driver = { 45static struct platform_driver net48xx_led_driver = {
52 .probe = net48xx_led_probe, 46 .probe = net48xx_led_probe,
53 .remove = net48xx_led_remove,
54 .driver = { 47 .driver = {
55 .name = DRVNAME, 48 .name = DRVNAME,
56 }, 49 },
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 25e419752a7b..4b88b93244be 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -26,6 +26,7 @@
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/of_gpio.h>
29#include <linux/leds.h> 30#include <linux/leds.h>
30#include <linux/platform_data/leds-kirkwood-netxbig.h> 31#include <linux/platform_data/leds-kirkwood-netxbig.h>
31 32
@@ -70,7 +71,8 @@ static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext,
70 spin_unlock_irqrestore(&gpio_ext_lock, flags); 71 spin_unlock_irqrestore(&gpio_ext_lock, flags);
71} 72}
72 73
73static int gpio_ext_init(struct netxbig_gpio_ext *gpio_ext) 74static int gpio_ext_init(struct platform_device *pdev,
75 struct netxbig_gpio_ext *gpio_ext)
74{ 76{
75 int err; 77 int err;
76 int i; 78 int i;
@@ -80,46 +82,28 @@ static int gpio_ext_init(struct netxbig_gpio_ext *gpio_ext)
80 82
81 /* Configure address GPIOs. */ 83 /* Configure address GPIOs. */
82 for (i = 0; i < gpio_ext->num_addr; i++) { 84 for (i = 0; i < gpio_ext->num_addr; i++) {
83 err = gpio_request_one(gpio_ext->addr[i], GPIOF_OUT_INIT_LOW, 85 err = devm_gpio_request_one(&pdev->dev, gpio_ext->addr[i],
84 "GPIO extension addr"); 86 GPIOF_OUT_INIT_LOW,
87 "GPIO extension addr");
85 if (err) 88 if (err)
86 goto err_free_addr; 89 return err;
87 } 90 }
88 /* Configure data GPIOs. */ 91 /* Configure data GPIOs. */
89 for (i = 0; i < gpio_ext->num_data; i++) { 92 for (i = 0; i < gpio_ext->num_data; i++) {
90 err = gpio_request_one(gpio_ext->data[i], GPIOF_OUT_INIT_LOW, 93 err = devm_gpio_request_one(&pdev->dev, gpio_ext->data[i],
91 "GPIO extension data"); 94 GPIOF_OUT_INIT_LOW,
95 "GPIO extension data");
92 if (err) 96 if (err)
93 goto err_free_data; 97 return err;
94 } 98 }
95 /* Configure "enable select" GPIO. */ 99 /* Configure "enable select" GPIO. */
96 err = gpio_request_one(gpio_ext->enable, GPIOF_OUT_INIT_LOW, 100 err = devm_gpio_request_one(&pdev->dev, gpio_ext->enable,
97 "GPIO extension enable"); 101 GPIOF_OUT_INIT_LOW,
102 "GPIO extension enable");
98 if (err) 103 if (err)
99 goto err_free_data; 104 return err;
100 105
101 return 0; 106 return 0;
102
103err_free_data:
104 for (i = i - 1; i >= 0; i--)
105 gpio_free(gpio_ext->data[i]);
106 i = gpio_ext->num_addr;
107err_free_addr:
108 for (i = i - 1; i >= 0; i--)
109 gpio_free(gpio_ext->addr[i]);
110
111 return err;
112}
113
114static void gpio_ext_free(struct netxbig_gpio_ext *gpio_ext)
115{
116 int i;
117
118 gpio_free(gpio_ext->enable);
119 for (i = gpio_ext->num_addr - 1; i >= 0; i--)
120 gpio_free(gpio_ext->addr[i]);
121 for (i = gpio_ext->num_data - 1; i >= 0; i--)
122 gpio_free(gpio_ext->data[i]);
123} 107}
124 108
125/* 109/*
@@ -132,7 +116,6 @@ struct netxbig_led_data {
132 int mode_addr; 116 int mode_addr;
133 int *mode_val; 117 int *mode_val;
134 int bright_addr; 118 int bright_addr;
135 int bright_max;
136 struct netxbig_led_timer *timer; 119 struct netxbig_led_timer *timer;
137 int num_timer; 120 int num_timer;
138 enum netxbig_led_mode mode; 121 enum netxbig_led_mode mode;
@@ -194,7 +177,7 @@ static void netxbig_led_set(struct led_classdev *led_cdev,
194 struct netxbig_led_data *led_dat = 177 struct netxbig_led_data *led_dat =
195 container_of(led_cdev, struct netxbig_led_data, cdev); 178 container_of(led_cdev, struct netxbig_led_data, cdev);
196 enum netxbig_led_mode mode; 179 enum netxbig_led_mode mode;
197 int mode_val, bright_val; 180 int mode_val;
198 int set_brightness = 1; 181 int set_brightness = 1;
199 unsigned long flags; 182 unsigned long flags;
200 183
@@ -220,12 +203,9 @@ static void netxbig_led_set(struct led_classdev *led_cdev,
220 * SATA LEDs. So, change the brightness setting for a single 203 * SATA LEDs. So, change the brightness setting for a single
221 * SATA LED will affect all the others. 204 * SATA LED will affect all the others.
222 */ 205 */
223 if (set_brightness) { 206 if (set_brightness)
224 bright_val = DIV_ROUND_UP(value * led_dat->bright_max,
225 LED_FULL);
226 gpio_ext_set_value(led_dat->gpio_ext, 207 gpio_ext_set_value(led_dat->gpio_ext,
227 led_dat->bright_addr, bright_val); 208 led_dat->bright_addr, value);
228 }
229 209
230 spin_unlock_irqrestore(&led_dat->lock, flags); 210 spin_unlock_irqrestore(&led_dat->lock, flags);
231} 211}
@@ -299,18 +279,11 @@ static struct attribute *netxbig_led_attrs[] = {
299}; 279};
300ATTRIBUTE_GROUPS(netxbig_led); 280ATTRIBUTE_GROUPS(netxbig_led);
301 281
302static void delete_netxbig_led(struct netxbig_led_data *led_dat) 282static int create_netxbig_led(struct platform_device *pdev,
283 struct netxbig_led_platform_data *pdata,
284 struct netxbig_led_data *led_dat,
285 const struct netxbig_led *template)
303{ 286{
304 led_classdev_unregister(&led_dat->cdev);
305}
306
307static int
308create_netxbig_led(struct platform_device *pdev,
309 struct netxbig_led_data *led_dat,
310 const struct netxbig_led *template)
311{
312 struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
313
314 spin_lock_init(&led_dat->lock); 287 spin_lock_init(&led_dat->lock);
315 led_dat->gpio_ext = pdata->gpio_ext; 288 led_dat->gpio_ext = pdata->gpio_ext;
316 led_dat->cdev.name = template->name; 289 led_dat->cdev.name = template->name;
@@ -329,11 +302,11 @@ create_netxbig_led(struct platform_device *pdev,
329 */ 302 */
330 led_dat->sata = 0; 303 led_dat->sata = 0;
331 led_dat->cdev.brightness = LED_OFF; 304 led_dat->cdev.brightness = LED_OFF;
305 led_dat->cdev.max_brightness = template->bright_max;
332 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; 306 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
333 led_dat->mode_addr = template->mode_addr; 307 led_dat->mode_addr = template->mode_addr;
334 led_dat->mode_val = template->mode_val; 308 led_dat->mode_val = template->mode_val;
335 led_dat->bright_addr = template->bright_addr; 309 led_dat->bright_addr = template->bright_addr;
336 led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
337 led_dat->timer = pdata->timer; 310 led_dat->timer = pdata->timer;
338 led_dat->num_timer = pdata->num_timer; 311 led_dat->num_timer = pdata->num_timer;
339 /* 312 /*
@@ -343,67 +316,274 @@ create_netxbig_led(struct platform_device *pdev,
343 if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) 316 if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
344 led_dat->cdev.groups = netxbig_led_groups; 317 led_dat->cdev.groups = netxbig_led_groups;
345 318
346 return led_classdev_register(&pdev->dev, &led_dat->cdev); 319 return devm_led_classdev_register(&pdev->dev, &led_dat->cdev);
347} 320}
348 321
349static int netxbig_led_probe(struct platform_device *pdev) 322#ifdef CONFIG_OF_GPIO
323static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
324 struct netxbig_gpio_ext *gpio_ext)
350{ 325{
351 struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev); 326 int *addr, *data;
352 struct netxbig_led_data *leds_data; 327 int num_addr, num_data;
353 int i;
354 int ret; 328 int ret;
329 int i;
355 330
356 if (!pdata) 331 ret = of_gpio_named_count(np, "addr-gpios");
357 return -EINVAL; 332 if (ret < 0) {
358 333 dev_err(dev,
359 leds_data = devm_kzalloc(&pdev->dev, 334 "Failed to count GPIOs in DT property addr-gpios\n");
360 sizeof(struct netxbig_led_data) * pdata->num_leds, GFP_KERNEL); 335 return ret;
361 if (!leds_data) 336 }
337 num_addr = ret;
338 addr = devm_kzalloc(dev, num_addr * sizeof(*addr), GFP_KERNEL);
339 if (!addr)
362 return -ENOMEM; 340 return -ENOMEM;
363 341
364 ret = gpio_ext_init(pdata->gpio_ext); 342 for (i = 0; i < num_addr; i++) {
365 if (ret < 0) 343 ret = of_get_named_gpio(np, "addr-gpios", i);
344 if (ret < 0)
345 return ret;
346 addr[i] = ret;
347 }
348 gpio_ext->addr = addr;
349 gpio_ext->num_addr = num_addr;
350
351 ret = of_gpio_named_count(np, "data-gpios");
352 if (ret < 0) {
353 dev_err(dev,
354 "Failed to count GPIOs in DT property data-gpios\n");
366 return ret; 355 return ret;
356 }
357 num_data = ret;
358 data = devm_kzalloc(dev, num_data * sizeof(*data), GFP_KERNEL);
359 if (!data)
360 return -ENOMEM;
367 361
368 for (i = 0; i < pdata->num_leds; i++) { 362 for (i = 0; i < num_data; i++) {
369 ret = create_netxbig_led(pdev, &leds_data[i], &pdata->leds[i]); 363 ret = of_get_named_gpio(np, "data-gpios", i);
370 if (ret < 0) 364 if (ret < 0)
371 goto err_free_leds; 365 return ret;
366 data[i] = ret;
372 } 367 }
368 gpio_ext->data = data;
369 gpio_ext->num_data = num_data;
373 370
374 platform_set_drvdata(pdev, leds_data); 371 ret = of_get_named_gpio(np, "enable-gpio", 0);
372 if (ret < 0) {
373 dev_err(dev,
374 "Failed to get GPIO from DT property enable-gpio\n");
375 return ret;
376 }
377 gpio_ext->enable = ret;
375 378
376 return 0; 379 return 0;
380}
381
382static int netxbig_leds_get_of_pdata(struct device *dev,
383 struct netxbig_led_platform_data *pdata)
384{
385 struct device_node *np = dev->of_node;
386 struct device_node *gpio_ext_np;
387 struct device_node *child;
388 struct netxbig_gpio_ext *gpio_ext;
389 struct netxbig_led_timer *timers;
390 struct netxbig_led *leds, *led;
391 int num_timers;
392 int num_leds = 0;
393 int ret;
394 int i;
377 395
378err_free_leds: 396 /* GPIO extension */
379 for (i = i - 1; i >= 0; i--) 397 gpio_ext_np = of_parse_phandle(np, "gpio-ext", 0);
380 delete_netxbig_led(&leds_data[i]); 398 if (!gpio_ext_np) {
399 dev_err(dev, "Failed to get DT handle gpio-ext\n");
400 return -EINVAL;
401 }
381 402
382 gpio_ext_free(pdata->gpio_ext); 403 gpio_ext = devm_kzalloc(dev, sizeof(*gpio_ext), GFP_KERNEL);
404 if (!gpio_ext)
405 return -ENOMEM;
406 ret = gpio_ext_get_of_pdata(dev, gpio_ext_np, gpio_ext);
407 if (ret)
408 return ret;
409 of_node_put(gpio_ext_np);
410 pdata->gpio_ext = gpio_ext;
411
412 /* Timers (optional) */
413 ret = of_property_count_u32_elems(np, "timers");
414 if (ret > 0) {
415 if (ret % 3)
416 return -EINVAL;
417 num_timers = ret / 3;
418 timers = devm_kzalloc(dev, num_timers * sizeof(*timers),
419 GFP_KERNEL);
420 if (!timers)
421 return -ENOMEM;
422 for (i = 0; i < num_timers; i++) {
423 u32 tmp;
424
425 of_property_read_u32_index(np, "timers", 3 * i,
426 &timers[i].mode);
427 if (timers[i].mode >= NETXBIG_LED_MODE_NUM)
428 return -EINVAL;
429 of_property_read_u32_index(np, "timers",
430 3 * i + 1, &tmp);
431 timers[i].delay_on = tmp;
432 of_property_read_u32_index(np, "timers",
433 3 * i + 2, &tmp);
434 timers[i].delay_off = tmp;
435 }
436 pdata->timer = timers;
437 pdata->num_timer = num_timers;
438 }
439
440 /* LEDs */
441 num_leds = of_get_child_count(np);
442 if (!num_leds) {
443 dev_err(dev, "No LED subnodes found in DT\n");
444 return -ENODEV;
445 }
446
447 leds = devm_kzalloc(dev, num_leds * sizeof(*leds), GFP_KERNEL);
448 if (!leds)
449 return -ENOMEM;
450
451 led = leds;
452 for_each_child_of_node(np, child) {
453 const char *string;
454 int *mode_val;
455 int num_modes;
456
457 ret = of_property_read_u32(child, "mode-addr",
458 &led->mode_addr);
459 if (ret)
460 goto err_node_put;
461
462 ret = of_property_read_u32(child, "bright-addr",
463 &led->bright_addr);
464 if (ret)
465 goto err_node_put;
466
467 ret = of_property_read_u32(child, "max-brightness",
468 &led->bright_max);
469 if (ret)
470 goto err_node_put;
471
472 mode_val =
473 devm_kzalloc(dev,
474 NETXBIG_LED_MODE_NUM * sizeof(*mode_val),
475 GFP_KERNEL);
476 if (!mode_val) {
477 ret = -ENOMEM;
478 goto err_node_put;
479 }
480
481 for (i = 0; i < NETXBIG_LED_MODE_NUM; i++)
482 mode_val[i] = NETXBIG_LED_INVALID_MODE;
483
484 ret = of_property_count_u32_elems(child, "mode-val");
485 if (ret < 0 || ret % 2) {
486 ret = -EINVAL;
487 goto err_node_put;
488 }
489 num_modes = ret / 2;
490 if (num_modes > NETXBIG_LED_MODE_NUM) {
491 ret = -EINVAL;
492 goto err_node_put;
493 }
494
495 for (i = 0; i < num_modes; i++) {
496 int mode;
497 int val;
498
499 of_property_read_u32_index(child,
500 "mode-val", 2 * i, &mode);
501 of_property_read_u32_index(child,
502 "mode-val", 2 * i + 1, &val);
503 if (mode >= NETXBIG_LED_MODE_NUM) {
504 ret = -EINVAL;
505 goto err_node_put;
506 }
507 mode_val[mode] = val;
508 }
509 led->mode_val = mode_val;
510
511 if (!of_property_read_string(child, "label", &string))
512 led->name = string;
513 else
514 led->name = child->name;
515
516 if (!of_property_read_string(child,
517 "linux,default-trigger", &string))
518 led->default_trigger = string;
519
520 led++;
521 }
522
523 pdata->leds = leds;
524 pdata->num_leds = num_leds;
525
526 return 0;
527
528err_node_put:
529 of_node_put(child);
383 return ret; 530 return ret;
384} 531}
385 532
386static int netxbig_led_remove(struct platform_device *pdev) 533static const struct of_device_id of_netxbig_leds_match[] = {
534 { .compatible = "lacie,netxbig-leds", },
535 {},
536};
537#else
538static inline int
539netxbig_leds_get_of_pdata(struct device *dev,
540 struct netxbig_led_platform_data *pdata)
541{
542 return -ENODEV;
543}
544#endif /* CONFIG_OF_GPIO */
545
546static int netxbig_led_probe(struct platform_device *pdev)
387{ 547{
388 struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev); 548 struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
389 struct netxbig_led_data *leds_data; 549 struct netxbig_led_data *leds_data;
390 int i; 550 int i;
551 int ret;
391 552
392 leds_data = platform_get_drvdata(pdev); 553 if (!pdata) {
554 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
555 if (!pdata)
556 return -ENOMEM;
557 ret = netxbig_leds_get_of_pdata(&pdev->dev, pdata);
558 if (ret)
559 return ret;
560 }
561
562 leds_data = devm_kzalloc(&pdev->dev,
563 pdata->num_leds * sizeof(*leds_data),
564 GFP_KERNEL);
565 if (!leds_data)
566 return -ENOMEM;
393 567
394 for (i = 0; i < pdata->num_leds; i++) 568 ret = gpio_ext_init(pdev, pdata->gpio_ext);
395 delete_netxbig_led(&leds_data[i]); 569 if (ret < 0)
570 return ret;
396 571
397 gpio_ext_free(pdata->gpio_ext); 572 for (i = 0; i < pdata->num_leds; i++) {
573 ret = create_netxbig_led(pdev, pdata,
574 &leds_data[i], &pdata->leds[i]);
575 if (ret < 0)
576 return ret;
577 }
398 578
399 return 0; 579 return 0;
400} 580}
401 581
402static struct platform_driver netxbig_led_driver = { 582static struct platform_driver netxbig_led_driver = {
403 .probe = netxbig_led_probe, 583 .probe = netxbig_led_probe,
404 .remove = netxbig_led_remove,
405 .driver = { 584 .driver = {
406 .name = "leds-netxbig", 585 .name = "leds-netxbig",
586 .of_match_table = of_match_ptr(of_netxbig_leds_match),
407 }, 587 },
408}; 588};
409 589
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
index 39870de20a26..12af1127d9b7 100644
--- a/drivers/leds/leds-ot200.c
+++ b/drivers/leds/leds-ot200.c
@@ -124,9 +124,9 @@ static int ot200_led_probe(struct platform_device *pdev)
124 leds[i].cdev.name = leds[i].name; 124 leds[i].cdev.name = leds[i].name;
125 leds[i].cdev.brightness_set = ot200_led_brightness_set; 125 leds[i].cdev.brightness_set = ot200_led_brightness_set;
126 126
127 ret = led_classdev_register(&pdev->dev, &leds[i].cdev); 127 ret = devm_led_classdev_register(&pdev->dev, &leds[i].cdev);
128 if (ret < 0) 128 if (ret < 0)
129 goto err; 129 return ret;
130 } 130 }
131 131
132 leds_front = 0; /* turn off all front leds */ 132 leds_front = 0; /* turn off all front leds */
@@ -135,27 +135,10 @@ static int ot200_led_probe(struct platform_device *pdev)
135 outb(leds_back, 0x5a); 135 outb(leds_back, 0x5a);
136 136
137 return 0; 137 return 0;
138
139err:
140 for (i = i - 1; i >= 0; i--)
141 led_classdev_unregister(&leds[i].cdev);
142
143 return ret;
144}
145
146static int ot200_led_remove(struct platform_device *pdev)
147{
148 int i;
149
150 for (i = 0; i < ARRAY_SIZE(leds); i++)
151 led_classdev_unregister(&leds[i].cdev);
152
153 return 0;
154} 138}
155 139
156static struct platform_driver ot200_led_driver = { 140static struct platform_driver ot200_led_driver = {
157 .probe = ot200_led_probe, 141 .probe = ot200_led_probe,
158 .remove = ot200_led_remove,
159 .driver = { 142 .driver = {
160 .name = "leds-ot200", 143 .name = "leds-ot200",
161 }, 144 },
diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c
index 2c5c5b12ab64..1e75e1fe9b72 100644
--- a/drivers/leds/leds-powernv.c
+++ b/drivers/leds/leds-powernv.c
@@ -262,15 +262,19 @@ static int powernv_led_classdev(struct platform_device *pdev,
262 while ((cur = of_prop_next_string(p, cur)) != NULL) { 262 while ((cur = of_prop_next_string(p, cur)) != NULL) {
263 powernv_led = devm_kzalloc(dev, sizeof(*powernv_led), 263 powernv_led = devm_kzalloc(dev, sizeof(*powernv_led),
264 GFP_KERNEL); 264 GFP_KERNEL);
265 if (!powernv_led) 265 if (!powernv_led) {
266 of_node_put(np);
266 return -ENOMEM; 267 return -ENOMEM;
268 }
267 269
268 powernv_led->common = powernv_led_common; 270 powernv_led->common = powernv_led_common;
269 powernv_led->loc_code = (char *)np->name; 271 powernv_led->loc_code = (char *)np->name;
270 272
271 rc = powernv_led_create(dev, powernv_led, cur); 273 rc = powernv_led_create(dev, powernv_led, cur);
272 if (rc) 274 if (rc) {
275 of_node_put(np);
273 return rc; 276 return rc;
277 }
274 } /* while end */ 278 } /* while end */
275 } 279 }
276 280
diff --git a/arch/mips/mti-sead3/leds-sead3.c b/drivers/leds/leds-sead3.c
index c938ceeb8848..eb97a3271bb3 100644
--- a/arch/mips/mti-sead3/leds-sead3.c
+++ b/drivers/leds/leds-sead3.c
@@ -59,6 +59,7 @@ static int sead3_led_remove(struct platform_device *pdev)
59{ 59{
60 led_classdev_unregister(&sead3_pled); 60 led_classdev_unregister(&sead3_pled);
61 led_classdev_unregister(&sead3_fled); 61 led_classdev_unregister(&sead3_fled);
62
62 return 0; 63 return 0;
63} 64}
64 65
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index 1ba3defdd460..473fb6b97ed4 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -76,39 +76,19 @@ static int wrap_led_probe(struct platform_device *pdev)
76{ 76{
77 int ret; 77 int ret;
78 78
79 ret = led_classdev_register(&pdev->dev, &wrap_power_led); 79 ret = devm_led_classdev_register(&pdev->dev, &wrap_power_led);
80 if (ret < 0) 80 if (ret < 0)
81 return ret; 81 return ret;
82 82
83 ret = led_classdev_register(&pdev->dev, &wrap_error_led); 83 ret = devm_led_classdev_register(&pdev->dev, &wrap_error_led);
84 if (ret < 0) 84 if (ret < 0)
85 goto err1; 85 return ret;
86
87 ret = led_classdev_register(&pdev->dev, &wrap_extra_led);
88 if (ret < 0)
89 goto err2;
90
91 return ret;
92
93err2:
94 led_classdev_unregister(&wrap_error_led);
95err1:
96 led_classdev_unregister(&wrap_power_led);
97
98 return ret;
99}
100 86
101static int wrap_led_remove(struct platform_device *pdev) 87 return devm_led_classdev_register(&pdev->dev, &wrap_extra_led);
102{
103 led_classdev_unregister(&wrap_power_led);
104 led_classdev_unregister(&wrap_error_led);
105 led_classdev_unregister(&wrap_extra_led);
106 return 0;
107} 88}
108 89
109static struct platform_driver wrap_led_driver = { 90static struct platform_driver wrap_led_driver = {
110 .probe = wrap_led_probe, 91 .probe = wrap_led_probe,
111 .remove = wrap_led_remove,
112 .driver = { 92 .driver = {
113 .name = DRVNAME, 93 .name = DRVNAME,
114 }, 94 },
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index bc89d7ace2c4..4238fbc31d35 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -44,6 +44,7 @@ static inline int led_get_brightness(struct led_classdev *led_cdev)
44 return led_cdev->brightness; 44 return led_cdev->brightness;
45} 45}
46 46
47void led_init_core(struct led_classdev *led_cdev);
47void led_stop_software_blink(struct led_classdev *led_cdev); 48void led_stop_software_blink(struct led_classdev *led_cdev);
48 49
49extern struct rw_semaphore leds_list_lock; 50extern struct rw_semaphore leds_list_lock;
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index fea6871d2609..8622ce651ae2 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -27,6 +27,7 @@ struct heartbeat_trig_data {
27 unsigned int phase; 27 unsigned int phase;
28 unsigned int period; 28 unsigned int period;
29 struct timer_list timer; 29 struct timer_list timer;
30 unsigned int invert;
30}; 31};
31 32
32static void led_heartbeat_function(unsigned long data) 33static void led_heartbeat_function(unsigned long data)
@@ -56,21 +57,27 @@ static void led_heartbeat_function(unsigned long data)
56 msecs_to_jiffies(heartbeat_data->period); 57 msecs_to_jiffies(heartbeat_data->period);
57 delay = msecs_to_jiffies(70); 58 delay = msecs_to_jiffies(70);
58 heartbeat_data->phase++; 59 heartbeat_data->phase++;
59 brightness = led_cdev->max_brightness; 60 if (!heartbeat_data->invert)
61 brightness = led_cdev->max_brightness;
60 break; 62 break;
61 case 1: 63 case 1:
62 delay = heartbeat_data->period / 4 - msecs_to_jiffies(70); 64 delay = heartbeat_data->period / 4 - msecs_to_jiffies(70);
63 heartbeat_data->phase++; 65 heartbeat_data->phase++;
66 if (heartbeat_data->invert)
67 brightness = led_cdev->max_brightness;
64 break; 68 break;
65 case 2: 69 case 2:
66 delay = msecs_to_jiffies(70); 70 delay = msecs_to_jiffies(70);
67 heartbeat_data->phase++; 71 heartbeat_data->phase++;
68 brightness = led_cdev->max_brightness; 72 if (!heartbeat_data->invert)
73 brightness = led_cdev->max_brightness;
69 break; 74 break;
70 default: 75 default:
71 delay = heartbeat_data->period - heartbeat_data->period / 4 - 76 delay = heartbeat_data->period - heartbeat_data->period / 4 -
72 msecs_to_jiffies(70); 77 msecs_to_jiffies(70);
73 heartbeat_data->phase = 0; 78 heartbeat_data->phase = 0;
79 if (heartbeat_data->invert)
80 brightness = led_cdev->max_brightness;
74 break; 81 break;
75 } 82 }
76 83
@@ -78,15 +85,50 @@ static void led_heartbeat_function(unsigned long data)
78 mod_timer(&heartbeat_data->timer, jiffies + delay); 85 mod_timer(&heartbeat_data->timer, jiffies + delay);
79} 86}
80 87
88static ssize_t led_invert_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
90{
91 struct led_classdev *led_cdev = dev_get_drvdata(dev);
92 struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
93
94 return sprintf(buf, "%u\n", heartbeat_data->invert);
95}
96
97static ssize_t led_invert_store(struct device *dev,
98 struct device_attribute *attr, const char *buf, size_t size)
99{
100 struct led_classdev *led_cdev = dev_get_drvdata(dev);
101 struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
102 unsigned long state;
103 int ret;
104
105 ret = kstrtoul(buf, 0, &state);
106 if (ret)
107 return ret;
108
109 heartbeat_data->invert = !!state;
110
111 return size;
112}
113
114static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
115
81static void heartbeat_trig_activate(struct led_classdev *led_cdev) 116static void heartbeat_trig_activate(struct led_classdev *led_cdev)
82{ 117{
83 struct heartbeat_trig_data *heartbeat_data; 118 struct heartbeat_trig_data *heartbeat_data;
119 int rc;
84 120
85 heartbeat_data = kzalloc(sizeof(*heartbeat_data), GFP_KERNEL); 121 heartbeat_data = kzalloc(sizeof(*heartbeat_data), GFP_KERNEL);
86 if (!heartbeat_data) 122 if (!heartbeat_data)
87 return; 123 return;
88 124
89 led_cdev->trigger_data = heartbeat_data; 125 led_cdev->trigger_data = heartbeat_data;
126 rc = device_create_file(led_cdev->dev, &dev_attr_invert);
127 if (rc) {
128 kfree(led_cdev->trigger_data);
129 return;
130 }
131
90 setup_timer(&heartbeat_data->timer, 132 setup_timer(&heartbeat_data->timer,
91 led_heartbeat_function, (unsigned long) led_cdev); 133 led_heartbeat_function, (unsigned long) led_cdev);
92 heartbeat_data->phase = 0; 134 heartbeat_data->phase = 0;
@@ -100,6 +142,7 @@ static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
100 142
101 if (led_cdev->activated) { 143 if (led_cdev->activated) {
102 del_timer_sync(&heartbeat_data->timer); 144 del_timer_sync(&heartbeat_data->timer);
145 device_remove_file(led_cdev->dev, &dev_attr_invert);
103 kfree(heartbeat_data); 146 kfree(heartbeat_data);
104 led_cdev->activated = false; 147 led_cdev->activated = false;
105 } 148 }
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 20cc36b01b77..0a17d1b91a81 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -634,10 +634,10 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
634 634
635 disk_super = dm_block_data(sblock); 635 disk_super = dm_block_data(sblock);
636 636
637 disk_super->flags = cpu_to_le32(cmd->flags);
637 if (mutator) 638 if (mutator)
638 update_flags(disk_super, mutator); 639 update_flags(disk_super, mutator);
639 640
640 disk_super->flags = cpu_to_le32(cmd->flags);
641 disk_super->mapping_root = cpu_to_le64(cmd->root); 641 disk_super->mapping_root = cpu_to_le64(cmd->root);
642 disk_super->hint_root = cpu_to_le64(cmd->hint_root); 642 disk_super->hint_root = cpu_to_le64(cmd->hint_root);
643 disk_super->discard_root = cpu_to_le64(cmd->discard_root); 643 disk_super->discard_root = cpu_to_le64(cmd->discard_root);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c702de18207a..3fe3d04a968a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8040,8 +8040,7 @@ static int remove_and_add_spares(struct mddev *mddev,
8040 !test_bit(Bitmap_sync, &rdev->flags))) 8040 !test_bit(Bitmap_sync, &rdev->flags)))
8041 continue; 8041 continue;
8042 8042
8043 if (rdev->saved_raid_disk < 0) 8043 rdev->recovery_offset = 0;
8044 rdev->recovery_offset = 0;
8045 if (mddev->pers-> 8044 if (mddev->pers->
8046 hot_add_disk(mddev, rdev) == 0) { 8045 hot_add_disk(mddev, rdev) == 0) {
8047 if (sysfs_link_rdev(mddev, rdev)) 8046 if (sysfs_link_rdev(mddev, rdev))
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 421a36c593e3..2e4c4cb79e4d 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -301,11 +301,16 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
301{ 301{
302 int s; 302 int s;
303 uint32_t max_entries = le32_to_cpu(left->header.max_entries); 303 uint32_t max_entries = le32_to_cpu(left->header.max_entries);
304 unsigned target = (nr_left + nr_center + nr_right) / 3; 304 unsigned total = nr_left + nr_center + nr_right;
305 BUG_ON(target > max_entries); 305 unsigned target_right = total / 3;
306 unsigned remainder = (target_right * 3) != total;
307 unsigned target_left = target_right + remainder;
308
309 BUG_ON(target_left > max_entries);
310 BUG_ON(target_right > max_entries);
306 311
307 if (nr_left < nr_right) { 312 if (nr_left < nr_right) {
308 s = nr_left - target; 313 s = nr_left - target_left;
309 314
310 if (s < 0 && nr_center < -s) { 315 if (s < 0 && nr_center < -s) {
311 /* not enough in central node */ 316 /* not enough in central node */
@@ -316,10 +321,10 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
316 } else 321 } else
317 shift(left, center, s); 322 shift(left, center, s);
318 323
319 shift(center, right, target - nr_right); 324 shift(center, right, target_right - nr_right);
320 325
321 } else { 326 } else {
322 s = target - nr_right; 327 s = target_right - nr_right;
323 if (s > 0 && nr_center < s) { 328 if (s > 0 && nr_center < s) {
324 /* not enough in central node */ 329 /* not enough in central node */
325 shift(center, right, nr_center); 330 shift(center, right, nr_center);
@@ -329,7 +334,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
329 } else 334 } else
330 shift(center, right, s); 335 shift(center, right, s);
331 336
332 shift(left, center, nr_left - target); 337 shift(left, center, nr_left - target_left);
333 } 338 }
334 339
335 *key_ptr(parent, c->index) = center->keys[0]; 340 *key_ptr(parent, c->index) = center->keys[0];
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index b6cec258cc21..0e09aef43998 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -523,7 +523,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
523 523
524 r = new_block(s->info, &right); 524 r = new_block(s->info, &right);
525 if (r < 0) { 525 if (r < 0) {
526 /* FIXME: put left */ 526 unlock_block(s->info, left);
527 return r; 527 return r;
528 } 528 }
529 529
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ddd8a5f572aa..d9d031ede4bf 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2195,7 +2195,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2195 bio_trim(wbio, sector - r1_bio->sector, sectors); 2195 bio_trim(wbio, sector - r1_bio->sector, sectors);
2196 wbio->bi_iter.bi_sector += rdev->data_offset; 2196 wbio->bi_iter.bi_sector += rdev->data_offset;
2197 wbio->bi_bdev = rdev->bdev; 2197 wbio->bi_bdev = rdev->bdev;
2198 if (submit_bio_wait(WRITE, wbio) == 0) 2198 if (submit_bio_wait(WRITE, wbio) < 0)
2199 /* failure! */ 2199 /* failure! */
2200 ok = rdev_set_badblocks(rdev, sector, 2200 ok = rdev_set_badblocks(rdev, sector,
2201 sectors, 0) 2201 sectors, 0)
@@ -2258,15 +2258,16 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2258 rdev_dec_pending(conf->mirrors[m].rdev, 2258 rdev_dec_pending(conf->mirrors[m].rdev,
2259 conf->mddev); 2259 conf->mddev);
2260 } 2260 }
2261 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2262 close_write(r1_bio);
2263 if (fail) { 2261 if (fail) {
2264 spin_lock_irq(&conf->device_lock); 2262 spin_lock_irq(&conf->device_lock);
2265 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); 2263 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2266 spin_unlock_irq(&conf->device_lock); 2264 spin_unlock_irq(&conf->device_lock);
2267 md_wakeup_thread(conf->mddev->thread); 2265 md_wakeup_thread(conf->mddev->thread);
2268 } else 2266 } else {
2267 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2268 close_write(r1_bio);
2269 raid_end_bio_io(r1_bio); 2269 raid_end_bio_io(r1_bio);
2270 }
2270} 2271}
2271 2272
2272static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) 2273static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
@@ -2385,6 +2386,10 @@ static void raid1d(struct md_thread *thread)
2385 r1_bio = list_first_entry(&tmp, struct r1bio, 2386 r1_bio = list_first_entry(&tmp, struct r1bio,
2386 retry_list); 2387 retry_list);
2387 list_del(&r1_bio->retry_list); 2388 list_del(&r1_bio->retry_list);
2389 if (mddev->degraded)
2390 set_bit(R1BIO_Degraded, &r1_bio->state);
2391 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2392 close_write(r1_bio);
2388 raid_end_bio_io(r1_bio); 2393 raid_end_bio_io(r1_bio);
2389 } 2394 }
2390 } 2395 }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9f69dc526f8c..96f365968306 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -39,6 +39,7 @@
39 * far_copies (stored in second byte of layout) 39 * far_copies (stored in second byte of layout)
40 * far_offset (stored in bit 16 of layout ) 40 * far_offset (stored in bit 16 of layout )
41 * use_far_sets (stored in bit 17 of layout ) 41 * use_far_sets (stored in bit 17 of layout )
42 * use_far_sets_bugfixed (stored in bit 18 of layout )
42 * 43 *
43 * The data to be stored is divided into chunks using chunksize. Each device 44 * The data to be stored is divided into chunks using chunksize. Each device
44 * is divided into far_copies sections. In each section, chunks are laid out 45 * is divided into far_copies sections. In each section, chunks are laid out
@@ -1497,6 +1498,8 @@ static void status(struct seq_file *seq, struct mddev *mddev)
1497 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); 1498 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1498 else 1499 else
1499 seq_printf(seq, " %d far-copies", conf->geo.far_copies); 1500 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1501 if (conf->geo.far_set_size != conf->geo.raid_disks)
1502 seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1500 } 1503 }
1501 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, 1504 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1502 conf->geo.raid_disks - mddev->degraded); 1505 conf->geo.raid_disks - mddev->degraded);
@@ -2467,7 +2470,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
2467 choose_data_offset(r10_bio, rdev) + 2470 choose_data_offset(r10_bio, rdev) +
2468 (sector - r10_bio->sector)); 2471 (sector - r10_bio->sector));
2469 wbio->bi_bdev = rdev->bdev; 2472 wbio->bi_bdev = rdev->bdev;
2470 if (submit_bio_wait(WRITE, wbio) == 0) 2473 if (submit_bio_wait(WRITE, wbio) < 0)
2471 /* Failure! */ 2474 /* Failure! */
2472 ok = rdev_set_badblocks(rdev, sector, 2475 ok = rdev_set_badblocks(rdev, sector,
2473 sectors, 0) 2476 sectors, 0)
@@ -2654,16 +2657,17 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2654 rdev_dec_pending(rdev, conf->mddev); 2657 rdev_dec_pending(rdev, conf->mddev);
2655 } 2658 }
2656 } 2659 }
2657 if (test_bit(R10BIO_WriteError,
2658 &r10_bio->state))
2659 close_write(r10_bio);
2660 if (fail) { 2660 if (fail) {
2661 spin_lock_irq(&conf->device_lock); 2661 spin_lock_irq(&conf->device_lock);
2662 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); 2662 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2663 spin_unlock_irq(&conf->device_lock); 2663 spin_unlock_irq(&conf->device_lock);
2664 md_wakeup_thread(conf->mddev->thread); 2664 md_wakeup_thread(conf->mddev->thread);
2665 } else 2665 } else {
2666 if (test_bit(R10BIO_WriteError,
2667 &r10_bio->state))
2668 close_write(r10_bio);
2666 raid_end_bio_io(r10_bio); 2669 raid_end_bio_io(r10_bio);
2670 }
2667 } 2671 }
2668} 2672}
2669 2673
@@ -2691,6 +2695,12 @@ static void raid10d(struct md_thread *thread)
2691 r10_bio = list_first_entry(&tmp, struct r10bio, 2695 r10_bio = list_first_entry(&tmp, struct r10bio,
2692 retry_list); 2696 retry_list);
2693 list_del(&r10_bio->retry_list); 2697 list_del(&r10_bio->retry_list);
2698 if (mddev->degraded)
2699 set_bit(R10BIO_Degraded, &r10_bio->state);
2700
2701 if (test_bit(R10BIO_WriteError,
2702 &r10_bio->state))
2703 close_write(r10_bio);
2694 raid_end_bio_io(r10_bio); 2704 raid_end_bio_io(r10_bio);
2695 } 2705 }
2696 } 2706 }
@@ -3387,7 +3397,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3387 disks = mddev->raid_disks + mddev->delta_disks; 3397 disks = mddev->raid_disks + mddev->delta_disks;
3388 break; 3398 break;
3389 } 3399 }
3390 if (layout >> 18) 3400 if (layout >> 19)
3391 return -1; 3401 return -1;
3392 if (chunk < (PAGE_SIZE >> 9) || 3402 if (chunk < (PAGE_SIZE >> 9) ||
3393 !is_power_of_2(chunk)) 3403 !is_power_of_2(chunk))
@@ -3399,7 +3409,22 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3399 geo->near_copies = nc; 3409 geo->near_copies = nc;
3400 geo->far_copies = fc; 3410 geo->far_copies = fc;
3401 geo->far_offset = fo; 3411 geo->far_offset = fo;
3402 geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks; 3412 switch (layout >> 17) {
3413 case 0: /* original layout. simple but not always optimal */
3414 geo->far_set_size = disks;
3415 break;
3416 case 1: /* "improved" layout which was buggy. Hopefully no-one is
3417 * actually using this, but leave code here just in case.*/
3418 geo->far_set_size = disks/fc;
3419 WARN(geo->far_set_size < fc,
3420 "This RAID10 layout does not provide data safety - please backup and create new array\n");
3421 break;
3422 case 2: /* "improved" layout fixed to match documentation */
3423 geo->far_set_size = fc * nc;
3424 break;
3425 default: /* Not a valid layout */
3426 return -1;
3427 }
3403 geo->chunk_mask = chunk - 1; 3428 geo->chunk_mask = chunk - 1;
3404 geo->chunk_shift = ffz(~chunk); 3429 geo->chunk_shift = ffz(~chunk);
3405 return nc*fc; 3430 return nc*fc;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 49bb8d3ff9be..45933c160697 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3499,6 +3499,7 @@ returnbi:
3499 } 3499 }
3500 if (!discard_pending && 3500 if (!discard_pending &&
3501 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { 3501 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
3502 int hash;
3502 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 3503 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
3503 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3504 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
3504 if (sh->qd_idx >= 0) { 3505 if (sh->qd_idx >= 0) {
@@ -3512,16 +3513,17 @@ returnbi:
3512 * no updated data, so remove it from hash list and the stripe 3513 * no updated data, so remove it from hash list and the stripe
3513 * will be reinitialized 3514 * will be reinitialized
3514 */ 3515 */
3515 spin_lock_irq(&conf->device_lock);
3516unhash: 3516unhash:
3517 hash = sh->hash_lock_index;
3518 spin_lock_irq(conf->hash_locks + hash);
3517 remove_hash(sh); 3519 remove_hash(sh);
3520 spin_unlock_irq(conf->hash_locks + hash);
3518 if (head_sh->batch_head) { 3521 if (head_sh->batch_head) {
3519 sh = list_first_entry(&sh->batch_list, 3522 sh = list_first_entry(&sh->batch_list,
3520 struct stripe_head, batch_list); 3523 struct stripe_head, batch_list);
3521 if (sh != head_sh) 3524 if (sh != head_sh)
3522 goto unhash; 3525 goto unhash;
3523 } 3526 }
3524 spin_unlock_irq(&conf->device_lock);
3525 sh = head_sh; 3527 sh = head_sh;
3526 3528
3527 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 3529 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h
index b055319d532e..c1e2d1834b78 100644
--- a/drivers/media/dvb-frontends/horus3a.h
+++ b/drivers/media/dvb-frontends/horus3a.h
@@ -46,8 +46,8 @@ extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
46 const struct horus3a_config *config, 46 const struct horus3a_config *config,
47 struct i2c_adapter *i2c); 47 struct i2c_adapter *i2c);
48#else 48#else
49static inline struct dvb_frontend *horus3a_attach( 49static inline struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
50 const struct cxd2820r_config *config, 50 const struct horus3a_config *config,
51 struct i2c_adapter *i2c) 51 struct i2c_adapter *i2c)
52{ 52{
53 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 53 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/dvb-frontends/lnbh25.h b/drivers/media/dvb-frontends/lnbh25.h
index 69f30e21f6b3..1f329ef05acc 100644
--- a/drivers/media/dvb-frontends/lnbh25.h
+++ b/drivers/media/dvb-frontends/lnbh25.h
@@ -43,7 +43,7 @@ struct dvb_frontend *lnbh25_attach(
43 struct lnbh25_config *cfg, 43 struct lnbh25_config *cfg,
44 struct i2c_adapter *i2c); 44 struct i2c_adapter *i2c);
45#else 45#else
46static inline dvb_frontend *lnbh25_attach( 46static inline struct dvb_frontend *lnbh25_attach(
47 struct dvb_frontend *fe, 47 struct dvb_frontend *fe,
48 struct lnbh25_config *cfg, 48 struct lnbh25_config *cfg,
49 struct i2c_adapter *i2c) 49 struct i2c_adapter *i2c)
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index ff31e7a01ca9..feeeb70d841e 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -18,6 +18,27 @@
18 18
19static struct dvb_frontend_ops m88ds3103_ops; 19static struct dvb_frontend_ops m88ds3103_ops;
20 20
21/* write single register with mask */
22static int m88ds3103_update_bits(struct m88ds3103_dev *dev,
23 u8 reg, u8 mask, u8 val)
24{
25 int ret;
26 u8 tmp;
27
28 /* no need for read if whole reg is written */
29 if (mask != 0xff) {
30 ret = regmap_bulk_read(dev->regmap, reg, &tmp, 1);
31 if (ret)
32 return ret;
33
34 val &= mask;
35 tmp &= ~mask;
36 val |= tmp;
37 }
38
39 return regmap_bulk_write(dev->regmap, reg, &val, 1);
40}
41
21/* write reg val table using reg addr auto increment */ 42/* write reg val table using reg addr auto increment */
22static int m88ds3103_wr_reg_val_tab(struct m88ds3103_dev *dev, 43static int m88ds3103_wr_reg_val_tab(struct m88ds3103_dev *dev,
23 const struct m88ds3103_reg_val *tab, int tab_len) 44 const struct m88ds3103_reg_val *tab, int tab_len)
@@ -394,10 +415,10 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
394 u8tmp2 = 0x00; /* 0b00 */ 415 u8tmp2 = 0x00; /* 0b00 */
395 break; 416 break;
396 } 417 }
397 ret = regmap_update_bits(dev->regmap, 0x22, 0xc0, u8tmp1 << 6); 418 ret = m88ds3103_update_bits(dev, 0x22, 0xc0, u8tmp1 << 6);
398 if (ret) 419 if (ret)
399 goto err; 420 goto err;
400 ret = regmap_update_bits(dev->regmap, 0x24, 0xc0, u8tmp2 << 6); 421 ret = m88ds3103_update_bits(dev, 0x24, 0xc0, u8tmp2 << 6);
401 if (ret) 422 if (ret)
402 goto err; 423 goto err;
403 } 424 }
@@ -455,13 +476,13 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
455 if (ret) 476 if (ret)
456 goto err; 477 goto err;
457 } 478 }
458 ret = regmap_update_bits(dev->regmap, 0x9d, 0x08, 0x08); 479 ret = m88ds3103_update_bits(dev, 0x9d, 0x08, 0x08);
459 if (ret) 480 if (ret)
460 goto err; 481 goto err;
461 ret = regmap_write(dev->regmap, 0xf1, 0x01); 482 ret = regmap_write(dev->regmap, 0xf1, 0x01);
462 if (ret) 483 if (ret)
463 goto err; 484 goto err;
464 ret = regmap_update_bits(dev->regmap, 0x30, 0x80, 0x80); 485 ret = m88ds3103_update_bits(dev, 0x30, 0x80, 0x80);
465 if (ret) 486 if (ret)
466 goto err; 487 goto err;
467 } 488 }
@@ -498,7 +519,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
498 switch (dev->cfg->ts_mode) { 519 switch (dev->cfg->ts_mode) {
499 case M88DS3103_TS_SERIAL: 520 case M88DS3103_TS_SERIAL:
500 case M88DS3103_TS_SERIAL_D7: 521 case M88DS3103_TS_SERIAL_D7:
501 ret = regmap_update_bits(dev->regmap, 0x29, 0x20, u8tmp1); 522 ret = m88ds3103_update_bits(dev, 0x29, 0x20, u8tmp1);
502 if (ret) 523 if (ret)
503 goto err; 524 goto err;
504 u8tmp1 = 0; 525 u8tmp1 = 0;
@@ -567,11 +588,11 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
567 if (ret) 588 if (ret)
568 goto err; 589 goto err;
569 590
570 ret = regmap_update_bits(dev->regmap, 0x4d, 0x02, dev->cfg->spec_inv << 1); 591 ret = m88ds3103_update_bits(dev, 0x4d, 0x02, dev->cfg->spec_inv << 1);
571 if (ret) 592 if (ret)
572 goto err; 593 goto err;
573 594
574 ret = regmap_update_bits(dev->regmap, 0x30, 0x10, dev->cfg->agc_inv << 4); 595 ret = m88ds3103_update_bits(dev, 0x30, 0x10, dev->cfg->agc_inv << 4);
575 if (ret) 596 if (ret)
576 goto err; 597 goto err;
577 598
@@ -625,13 +646,13 @@ static int m88ds3103_init(struct dvb_frontend *fe)
625 dev->warm = false; 646 dev->warm = false;
626 647
627 /* wake up device from sleep */ 648 /* wake up device from sleep */
628 ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x01); 649 ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x01);
629 if (ret) 650 if (ret)
630 goto err; 651 goto err;
631 ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x00); 652 ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x00);
632 if (ret) 653 if (ret)
633 goto err; 654 goto err;
634 ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x00); 655 ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x00);
635 if (ret) 656 if (ret)
636 goto err; 657 goto err;
637 658
@@ -749,18 +770,18 @@ static int m88ds3103_sleep(struct dvb_frontend *fe)
749 utmp = 0x29; 770 utmp = 0x29;
750 else 771 else
751 utmp = 0x27; 772 utmp = 0x27;
752 ret = regmap_update_bits(dev->regmap, utmp, 0x01, 0x00); 773 ret = m88ds3103_update_bits(dev, utmp, 0x01, 0x00);
753 if (ret) 774 if (ret)
754 goto err; 775 goto err;
755 776
756 /* sleep */ 777 /* sleep */
757 ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00); 778 ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00);
758 if (ret) 779 if (ret)
759 goto err; 780 goto err;
760 ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01); 781 ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01);
761 if (ret) 782 if (ret)
762 goto err; 783 goto err;
763 ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10); 784 ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10);
764 if (ret) 785 if (ret)
765 goto err; 786 goto err;
766 787
@@ -992,12 +1013,12 @@ static int m88ds3103_set_tone(struct dvb_frontend *fe,
992 } 1013 }
993 1014
994 utmp = tone << 7 | dev->cfg->envelope_mode << 5; 1015 utmp = tone << 7 | dev->cfg->envelope_mode << 5;
995 ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); 1016 ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp);
996 if (ret) 1017 if (ret)
997 goto err; 1018 goto err;
998 1019
999 utmp = 1 << 2; 1020 utmp = 1 << 2;
1000 ret = regmap_update_bits(dev->regmap, 0xa1, reg_a1_mask, utmp); 1021 ret = m88ds3103_update_bits(dev, 0xa1, reg_a1_mask, utmp);
1001 if (ret) 1022 if (ret)
1002 goto err; 1023 goto err;
1003 1024
@@ -1047,7 +1068,7 @@ static int m88ds3103_set_voltage(struct dvb_frontend *fe,
1047 voltage_dis ^= dev->cfg->lnb_en_pol; 1068 voltage_dis ^= dev->cfg->lnb_en_pol;
1048 1069
1049 utmp = voltage_dis << 1 | voltage_sel << 0; 1070 utmp = voltage_dis << 1 | voltage_sel << 0;
1050 ret = regmap_update_bits(dev->regmap, 0xa2, 0x03, utmp); 1071 ret = m88ds3103_update_bits(dev, 0xa2, 0x03, utmp);
1051 if (ret) 1072 if (ret)
1052 goto err; 1073 goto err;
1053 1074
@@ -1080,7 +1101,7 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
1080 } 1101 }
1081 1102
1082 utmp = dev->cfg->envelope_mode << 5; 1103 utmp = dev->cfg->envelope_mode << 5;
1083 ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); 1104 ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp);
1084 if (ret) 1105 if (ret)
1085 goto err; 1106 goto err;
1086 1107
@@ -1115,12 +1136,12 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
1115 } else { 1136 } else {
1116 dev_dbg(&client->dev, "diseqc tx timeout\n"); 1137 dev_dbg(&client->dev, "diseqc tx timeout\n");
1117 1138
1118 ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40); 1139 ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40);
1119 if (ret) 1140 if (ret)
1120 goto err; 1141 goto err;
1121 } 1142 }
1122 1143
1123 ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80); 1144 ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80);
1124 if (ret) 1145 if (ret)
1125 goto err; 1146 goto err;
1126 1147
@@ -1152,7 +1173,7 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
1152 } 1173 }
1153 1174
1154 utmp = dev->cfg->envelope_mode << 5; 1175 utmp = dev->cfg->envelope_mode << 5;
1155 ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); 1176 ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp);
1156 if (ret) 1177 if (ret)
1157 goto err; 1178 goto err;
1158 1179
@@ -1194,12 +1215,12 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
1194 } else { 1215 } else {
1195 dev_dbg(&client->dev, "diseqc tx timeout\n"); 1216 dev_dbg(&client->dev, "diseqc tx timeout\n");
1196 1217
1197 ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40); 1218 ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40);
1198 if (ret) 1219 if (ret)
1199 goto err; 1220 goto err;
1200 } 1221 }
1201 1222
1202 ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80); 1223 ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80);
1203 if (ret) 1224 if (ret)
1204 goto err; 1225 goto err;
1205 1226
@@ -1435,13 +1456,13 @@ static int m88ds3103_probe(struct i2c_client *client,
1435 goto err_kfree; 1456 goto err_kfree;
1436 1457
1437 /* sleep */ 1458 /* sleep */
1438 ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00); 1459 ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00);
1439 if (ret) 1460 if (ret)
1440 goto err_kfree; 1461 goto err_kfree;
1441 ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01); 1462 ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01);
1442 if (ret) 1463 if (ret)
1443 goto err_kfree; 1464 goto err_kfree;
1444 ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10); 1465 ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10);
1445 if (ret) 1466 if (ret)
1446 goto err_kfree; 1467 goto err_kfree;
1447 1468
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 81788c5a44d8..821a8f481507 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -502,6 +502,10 @@ static int si2168_init(struct dvb_frontend *fe)
502 /* firmware is in the new format */ 502 /* firmware is in the new format */
503 for (remaining = fw->size; remaining > 0; remaining -= 17) { 503 for (remaining = fw->size; remaining > 0; remaining -= 17) {
504 len = fw->data[fw->size - remaining]; 504 len = fw->data[fw->size - remaining];
505 if (len > SI2168_ARGLEN) {
506 ret = -EINVAL;
507 break;
508 }
505 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); 509 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
506 cmd.wlen = len; 510 cmd.wlen = len;
507 cmd.rlen = 1; 511 cmd.rlen = 1;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
index f55b3276f28d..56773f3893d4 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
@@ -80,11 +80,9 @@ irqreturn_t netup_spi_interrupt(struct netup_spi *spi)
80 u16 reg; 80 u16 reg;
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (!spi) { 83 if (!spi)
84 dev_dbg(&spi->master->dev,
85 "%s(): SPI not initialized\n", __func__);
86 return IRQ_NONE; 84 return IRQ_NONE;
87 } 85
88 spin_lock_irqsave(&spi->lock, flags); 86 spin_lock_irqsave(&spi->lock, flags);
89 reg = readw(&spi->regs->control_stat); 87 reg = readw(&spi->regs->control_stat);
90 if (!(reg & NETUP_SPI_CTRL_IRQ)) { 88 if (!(reg & NETUP_SPI_CTRL_IRQ)) {
@@ -234,11 +232,9 @@ void netup_spi_release(struct netup_unidvb_dev *ndev)
234 unsigned long flags; 232 unsigned long flags;
235 struct netup_spi *spi = ndev->spi; 233 struct netup_spi *spi = ndev->spi;
236 234
237 if (!spi) { 235 if (!spi)
238 dev_dbg(&spi->master->dev,
239 "%s(): SPI not initialized\n", __func__);
240 return; 236 return;
241 } 237
242 spin_lock_irqsave(&spi->lock, flags); 238 spin_lock_irqsave(&spi->lock, flags);
243 reg = readw(&spi->regs->control_stat); 239 reg = readw(&spi->regs->control_stat);
244 writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat); 240 writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 486aef50d99b..f922f2e827bc 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -1097,7 +1097,7 @@ static int load_slim_core_fw(const struct firmware *fw, void *context)
1097 Elf32_Ehdr *ehdr; 1097 Elf32_Ehdr *ehdr;
1098 Elf32_Phdr *phdr; 1098 Elf32_Phdr *phdr;
1099 u8 __iomem *dst; 1099 u8 __iomem *dst;
1100 int err, i; 1100 int err = 0, i;
1101 1101
1102 if (!fw || !context) 1102 if (!fw || !context)
1103 return -EINVAL; 1103 return -EINVAL;
@@ -1106,7 +1106,7 @@ static int load_slim_core_fw(const struct firmware *fw, void *context)
1106 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff); 1106 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1107 1107
1108 /* go through the available ELF segments */ 1108 /* go through the available ELF segments */
1109 for (i = 0; i < ehdr->e_phnum && !err; i++, phdr++) { 1109 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1110 1110
1111 /* Only consider LOAD segments */ 1111 /* Only consider LOAD segments */
1112 if (phdr->p_type != PT_LOAD) 1112 if (phdr->p_type != PT_LOAD)
@@ -1192,7 +1192,6 @@ err:
1192 1192
1193static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei) 1193static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
1194{ 1194{
1195 int ret;
1196 int err; 1195 int err;
1197 1196
1198 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA); 1197 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
@@ -1207,7 +1206,7 @@ static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
1207 if (err) { 1206 if (err) {
1208 dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err); 1207 dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err);
1209 complete_all(&fei->fw_ack); 1208 complete_all(&fei->fw_ack);
1210 return ret; 1209 return err;
1211 } 1210 }
1212 1211
1213 return 0; 1212 return 0;
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index 1c087cb76815..d0549fba711c 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -257,7 +257,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
257 goto clkerr; 257 goto clkerr;
258 258
259 if (devm_request_irq(dev, priv->irq, hix5hd2_ir_rx_interrupt, 259 if (devm_request_irq(dev, priv->irq, hix5hd2_ir_rx_interrupt,
260 IRQF_NO_SUSPEND, pdev->name, priv) < 0) { 260 0, pdev->name, priv) < 0) {
261 dev_err(dev, "IRQ %d register failed\n", priv->irq); 261 dev_err(dev, "IRQ %d register failed\n", priv->irq);
262 ret = -EINVAL; 262 ret = -EINVAL;
263 goto regerr; 263 goto regerr;
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 507382160e5e..ce157edd45fa 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -166,6 +166,10 @@ static int si2157_init(struct dvb_frontend *fe)
166 166
167 for (remaining = fw->size; remaining > 0; remaining -= 17) { 167 for (remaining = fw->size; remaining > 0; remaining -= 17) {
168 len = fw->data[fw->size - remaining]; 168 len = fw->data[fw->size - remaining];
169 if (len > SI2157_ARGLEN) {
170 dev_err(&client->dev, "Bad firmware length\n");
171 goto err_release_firmware;
172 }
169 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); 173 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
170 cmd.wlen = len; 174 cmd.wlen = len;
171 cmd.rlen = 1; 175 cmd.rlen = 1;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c3cac4c12fb3..197a4f2e54d2 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -34,6 +34,14 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
34 unsigned int pipe; 34 unsigned int pipe;
35 u8 requesttype; 35 u8 requesttype;
36 36
37 mutex_lock(&d->usb_mutex);
38
39 if (req->size > sizeof(dev->buf)) {
40 dev_err(&d->intf->dev, "too large message %u\n", req->size);
41 ret = -EINVAL;
42 goto err_mutex_unlock;
43 }
44
37 if (req->index & CMD_WR_FLAG) { 45 if (req->index & CMD_WR_FLAG) {
38 /* write */ 46 /* write */
39 memcpy(dev->buf, req->data, req->size); 47 memcpy(dev->buf, req->data, req->size);
@@ -50,14 +58,17 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
50 dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value, 58 dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value,
51 req->index, dev->buf, req->size); 59 req->index, dev->buf, req->size);
52 if (ret < 0) 60 if (ret < 0)
53 goto err; 61 goto err_mutex_unlock;
54 62
55 /* read request, copy returned data to return buf */ 63 /* read request, copy returned data to return buf */
56 if (requesttype == (USB_TYPE_VENDOR | USB_DIR_IN)) 64 if (requesttype == (USB_TYPE_VENDOR | USB_DIR_IN))
57 memcpy(req->data, dev->buf, req->size); 65 memcpy(req->data, dev->buf, req->size);
58 66
67 mutex_unlock(&d->usb_mutex);
68
59 return 0; 69 return 0;
60err: 70err_mutex_unlock:
71 mutex_unlock(&d->usb_mutex);
61 dev_dbg(&d->intf->dev, "failed=%d\n", ret); 72 dev_dbg(&d->intf->dev, "failed=%d\n", ret);
62 return ret; 73 return ret;
63} 74}
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
index 9f6115a2ee01..138062960a73 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
@@ -71,7 +71,7 @@
71 71
72 72
73struct rtl28xxu_dev { 73struct rtl28xxu_dev {
74 u8 buf[28]; 74 u8 buf[128];
75 u8 chip_id; 75 u8 chip_id;
76 u8 tuner; 76 u8 tuner;
77 char *tuner_name; 77 char *tuner_name;
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 82876a67f144..9beece00869b 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -47,7 +47,7 @@ config V4L2_MEM2MEM_DEV
47# Used by LED subsystem flash drivers 47# Used by LED subsystem flash drivers
48config V4L2_FLASH_LED_CLASS 48config V4L2_FLASH_LED_CLASS
49 tristate "V4L2 flash API for LED flash class devices" 49 tristate "V4L2 flash API for LED flash class devices"
50 depends on VIDEO_V4L2_SUBDEV_API 50 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
51 depends on LEDS_CLASS_FLASH 51 depends on LEDS_CLASS_FLASH
52 ---help--- 52 ---help---
53 Say Y here to enable V4L2 flash API support for LED flash 53 Say Y here to enable V4L2 flash API support for LED flash
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index c6a644b22af4..6f3154613dc7 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -58,12 +58,18 @@ config OMAP_GPMC
58 memory drives like NOR, NAND, OneNAND, SRAM. 58 memory drives like NOR, NAND, OneNAND, SRAM.
59 59
60config OMAP_GPMC_DEBUG 60config OMAP_GPMC_DEBUG
61 bool 61 bool "Enable GPMC debug output and skip reset of GPMC during init"
62 depends on OMAP_GPMC 62 depends on OMAP_GPMC
63 help 63 help
64 Enables verbose debugging mostly to decode the bootloader provided 64 Enables verbose debugging mostly to decode the bootloader provided
65 timings. Enable this during development to configure devices 65 timings. To preserve the bootloader provided timings, the reset
66 connected to the GPMC bus. 66 of GPMC is skipped during init. Enable this during development to
67 configure devices connected to the GPMC bus.
68
69 NOTE: In addition to matching the register setup with the bootloader
70 you also need to match the GPMC FCLK frequency used by the
71 bootloader or else the GPMC timings won't be identical with the
72 bootloader timings.
67 73
68config MVEBU_DEVBUS 74config MVEBU_DEVBUS
69 bool "Marvell EBU Device Bus Controller" 75 bool "Marvell EBU Device Bus Controller"
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 32ac049f2bc4..6515dfc2b805 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -696,7 +696,6 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
696 int div; 696 int div;
697 u32 l; 697 u32 l;
698 698
699 gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings");
700 div = gpmc_calc_divider(t->sync_clk); 699 div = gpmc_calc_divider(t->sync_clk);
701 if (div < 0) 700 if (div < 0)
702 return div; 701 return div;
@@ -1988,6 +1987,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
1988 if (ret < 0) 1987 if (ret < 0)
1989 goto err; 1988 goto err;
1990 1989
1990 gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings");
1991 ret = gpmc_cs_program_settings(cs, &gpmc_s); 1991 ret = gpmc_cs_program_settings(cs, &gpmc_s);
1992 if (ret < 0) 1992 if (ret < 0)
1993 goto err; 1993 goto err;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c742cfd7674e..23b6c8e8701c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -387,6 +387,24 @@ out:
387 return ERR_PTR(err); 387 return ERR_PTR(err);
388} 388}
389 389
390static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
391 struct mmc_blk_ioc_data *idata)
392{
393 struct mmc_ioc_cmd *ic = &idata->ic;
394
395 if (copy_to_user(&(ic_ptr->response), ic->response,
396 sizeof(ic->response)))
397 return -EFAULT;
398
399 if (!idata->ic.write_flag) {
400 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
401 idata->buf, idata->buf_bytes))
402 return -EFAULT;
403 }
404
405 return 0;
406}
407
390static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, 408static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
391 u32 retries_max) 409 u32 retries_max)
392{ 410{
@@ -447,12 +465,9 @@ out:
447 return err; 465 return err;
448} 466}
449 467
450static int mmc_blk_ioctl_cmd(struct block_device *bdev, 468static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
451 struct mmc_ioc_cmd __user *ic_ptr) 469 struct mmc_blk_ioc_data *idata)
452{ 470{
453 struct mmc_blk_ioc_data *idata;
454 struct mmc_blk_data *md;
455 struct mmc_card *card;
456 struct mmc_command cmd = {0}; 471 struct mmc_command cmd = {0};
457 struct mmc_data data = {0}; 472 struct mmc_data data = {0};
458 struct mmc_request mrq = {NULL}; 473 struct mmc_request mrq = {NULL};
@@ -461,33 +476,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
461 int is_rpmb = false; 476 int is_rpmb = false;
462 u32 status = 0; 477 u32 status = 0;
463 478
464 /* 479 if (!card || !md || !idata)
465 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 480 return -EINVAL;
466 * whole block device, not on a partition. This prevents overspray
467 * between sibling partitions.
468 */
469 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
470 return -EPERM;
471
472 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
473 if (IS_ERR(idata))
474 return PTR_ERR(idata);
475
476 md = mmc_blk_get(bdev->bd_disk);
477 if (!md) {
478 err = -EINVAL;
479 goto cmd_err;
480 }
481 481
482 if (md->area_type & MMC_BLK_DATA_AREA_RPMB) 482 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
483 is_rpmb = true; 483 is_rpmb = true;
484 484
485 card = md->queue.card;
486 if (IS_ERR(card)) {
487 err = PTR_ERR(card);
488 goto cmd_done;
489 }
490
491 cmd.opcode = idata->ic.opcode; 485 cmd.opcode = idata->ic.opcode;
492 cmd.arg = idata->ic.arg; 486 cmd.arg = idata->ic.arg;
493 cmd.flags = idata->ic.flags; 487 cmd.flags = idata->ic.flags;
@@ -530,23 +524,21 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
530 524
531 mrq.cmd = &cmd; 525 mrq.cmd = &cmd;
532 526
533 mmc_get_card(card);
534
535 err = mmc_blk_part_switch(card, md); 527 err = mmc_blk_part_switch(card, md);
536 if (err) 528 if (err)
537 goto cmd_rel_host; 529 return err;
538 530
539 if (idata->ic.is_acmd) { 531 if (idata->ic.is_acmd) {
540 err = mmc_app_cmd(card->host, card); 532 err = mmc_app_cmd(card->host, card);
541 if (err) 533 if (err)
542 goto cmd_rel_host; 534 return err;
543 } 535 }
544 536
545 if (is_rpmb) { 537 if (is_rpmb) {
546 err = mmc_set_blockcount(card, data.blocks, 538 err = mmc_set_blockcount(card, data.blocks,
547 idata->ic.write_flag & (1 << 31)); 539 idata->ic.write_flag & (1 << 31));
548 if (err) 540 if (err)
549 goto cmd_rel_host; 541 return err;
550 } 542 }
551 543
552 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && 544 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
@@ -557,7 +549,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
557 pr_err("%s: ioctl_do_sanitize() failed. err = %d", 549 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
558 __func__, err); 550 __func__, err);
559 551
560 goto cmd_rel_host; 552 return err;
561 } 553 }
562 554
563 mmc_wait_for_req(card->host, &mrq); 555 mmc_wait_for_req(card->host, &mrq);
@@ -565,14 +557,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
565 if (cmd.error) { 557 if (cmd.error) {
566 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", 558 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
567 __func__, cmd.error); 559 __func__, cmd.error);
568 err = cmd.error; 560 return cmd.error;
569 goto cmd_rel_host;
570 } 561 }
571 if (data.error) { 562 if (data.error) {
572 dev_err(mmc_dev(card->host), "%s: data error %d\n", 563 dev_err(mmc_dev(card->host), "%s: data error %d\n",
573 __func__, data.error); 564 __func__, data.error);
574 err = data.error; 565 return data.error;
575 goto cmd_rel_host;
576 } 566 }
577 567
578 /* 568 /*
@@ -582,18 +572,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
582 if (idata->ic.postsleep_min_us) 572 if (idata->ic.postsleep_min_us)
583 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); 573 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
584 574
585 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { 575 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
586 err = -EFAULT;
587 goto cmd_rel_host;
588 }
589
590 if (!idata->ic.write_flag) {
591 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
592 idata->buf, idata->buf_bytes)) {
593 err = -EFAULT;
594 goto cmd_rel_host;
595 }
596 }
597 576
598 if (is_rpmb) { 577 if (is_rpmb) {
599 /* 578 /*
@@ -607,24 +586,132 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
607 __func__, status, err); 586 __func__, status, err);
608 } 587 }
609 588
610cmd_rel_host: 589 return err;
590}
591
592static int mmc_blk_ioctl_cmd(struct block_device *bdev,
593 struct mmc_ioc_cmd __user *ic_ptr)
594{
595 struct mmc_blk_ioc_data *idata;
596 struct mmc_blk_data *md;
597 struct mmc_card *card;
598 int err = 0, ioc_err = 0;
599
600 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
601 if (IS_ERR(idata))
602 return PTR_ERR(idata);
603
604 md = mmc_blk_get(bdev->bd_disk);
605 if (!md) {
606 err = -EINVAL;
607 goto cmd_err;
608 }
609
610 card = md->queue.card;
611 if (IS_ERR(card)) {
612 err = PTR_ERR(card);
613 goto cmd_done;
614 }
615
616 mmc_get_card(card);
617
618 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
619
611 mmc_put_card(card); 620 mmc_put_card(card);
612 621
622 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
623
613cmd_done: 624cmd_done:
614 mmc_blk_put(md); 625 mmc_blk_put(md);
615cmd_err: 626cmd_err:
616 kfree(idata->buf); 627 kfree(idata->buf);
617 kfree(idata); 628 kfree(idata);
618 return err; 629 return ioc_err ? ioc_err : err;
630}
631
632static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
633 struct mmc_ioc_multi_cmd __user *user)
634{
635 struct mmc_blk_ioc_data **idata = NULL;
636 struct mmc_ioc_cmd __user *cmds = user->cmds;
637 struct mmc_card *card;
638 struct mmc_blk_data *md;
639 int i, err = 0, ioc_err = 0;
640 __u64 num_of_cmds;
641
642 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
643 sizeof(num_of_cmds)))
644 return -EFAULT;
645
646 if (num_of_cmds > MMC_IOC_MAX_CMDS)
647 return -EINVAL;
648
649 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
650 if (!idata)
651 return -ENOMEM;
652
653 for (i = 0; i < num_of_cmds; i++) {
654 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
655 if (IS_ERR(idata[i])) {
656 err = PTR_ERR(idata[i]);
657 num_of_cmds = i;
658 goto cmd_err;
659 }
660 }
661
662 md = mmc_blk_get(bdev->bd_disk);
663 if (!md)
664 goto cmd_err;
665
666 card = md->queue.card;
667 if (IS_ERR(card)) {
668 err = PTR_ERR(card);
669 goto cmd_done;
670 }
671
672 mmc_get_card(card);
673
674 for (i = 0; i < num_of_cmds && !ioc_err; i++)
675 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
676
677 mmc_put_card(card);
678
679 /* copy to user if data and response */
680 for (i = 0; i < num_of_cmds && !err; i++)
681 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
682
683cmd_done:
684 mmc_blk_put(md);
685cmd_err:
686 for (i = 0; i < num_of_cmds; i++) {
687 kfree(idata[i]->buf);
688 kfree(idata[i]);
689 }
690 kfree(idata);
691 return ioc_err ? ioc_err : err;
619} 692}
620 693
621static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, 694static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
622 unsigned int cmd, unsigned long arg) 695 unsigned int cmd, unsigned long arg)
623{ 696{
624 int ret = -EINVAL; 697 /*
625 if (cmd == MMC_IOC_CMD) 698 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
626 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); 699 * whole block device, not on a partition. This prevents overspray
627 return ret; 700 * between sibling partitions.
701 */
702 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
703 return -EPERM;
704
705 switch (cmd) {
706 case MMC_IOC_CMD:
707 return mmc_blk_ioctl_cmd(bdev,
708 (struct mmc_ioc_cmd __user *)arg);
709 case MMC_IOC_MULTI_CMD:
710 return mmc_blk_ioctl_multi_cmd(bdev,
711 (struct mmc_ioc_multi_cmd __user *)arg);
712 default:
713 return -EINVAL;
714 }
628} 715}
629 716
630#ifdef CONFIG_COMPAT 717#ifdef CONFIG_COMPAT
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b78cf5d403a3..7fc9174d4619 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2263,15 +2263,12 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2263/* 2263/*
2264 * eMMC hardware reset. 2264 * eMMC hardware reset.
2265 */ 2265 */
2266static int mmc_test_hw_reset(struct mmc_test_card *test) 2266static int mmc_test_reset(struct mmc_test_card *test)
2267{ 2267{
2268 struct mmc_card *card = test->card; 2268 struct mmc_card *card = test->card;
2269 struct mmc_host *host = card->host; 2269 struct mmc_host *host = card->host;
2270 int err; 2270 int err;
2271 2271
2272 if (!mmc_card_mmc(card) || !mmc_can_reset(card))
2273 return RESULT_UNSUP_CARD;
2274
2275 err = mmc_hw_reset(host); 2272 err = mmc_hw_reset(host);
2276 if (!err) 2273 if (!err)
2277 return RESULT_OK; 2274 return RESULT_OK;
@@ -2605,8 +2602,8 @@ static const struct mmc_test_case mmc_test_cases[] = {
2605 }, 2602 },
2606 2603
2607 { 2604 {
2608 .name = "eMMC hardware reset", 2605 .name = "Reset test",
2609 .run = mmc_test_hw_reset, 2606 .run = mmc_test_reset,
2610 }, 2607 },
2611}; 2608};
2612 2609
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 9ebee72d9c3f..4c33d7690f2f 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -1,13 +1,3 @@
1# 1#
2# MMC core configuration 2# MMC core configuration
3# 3#
4
5config MMC_CLKGATE
6 bool "MMC host clock gating"
7 help
8 This will attempt to aggressively gate the clock to the MMC card.
9 This is done to save power due to gating off the logic and bus
10 noise when the MMC card is not in use. Your host driver has to
11 support handling this in order for it to be of any use.
12
13 If unsure, say N.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a3eb20bdcd97..5ae89e48fd85 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -187,8 +187,6 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
187 187
188 if (mrq->done) 188 if (mrq->done)
189 mrq->done(mrq); 189 mrq->done(mrq);
190
191 mmc_host_clk_release(host);
192 } 190 }
193} 191}
194 192
@@ -206,6 +204,23 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
206 return; 204 return;
207 } 205 }
208 206
207 /*
208 * For sdio rw commands we must wait for card busy otherwise some
209 * sdio devices won't work properly.
210 */
211 if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
212 int tries = 500; /* Wait aprox 500ms at maximum */
213
214 while (host->ops->card_busy(host) && --tries)
215 mmc_delay(1);
216
217 if (tries == 0) {
218 mrq->cmd->error = -EBUSY;
219 mmc_request_done(host, mrq);
220 return;
221 }
222 }
223
209 host->ops->request(host, mrq); 224 host->ops->request(host, mrq);
210} 225}
211 226
@@ -275,7 +290,6 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
275 mrq->stop->mrq = mrq; 290 mrq->stop->mrq = mrq;
276 } 291 }
277 } 292 }
278 mmc_host_clk_hold(host);
279 led_trigger_event(host->led, LED_FULL); 293 led_trigger_event(host->led, LED_FULL);
280 __mmc_start_request(host, mrq); 294 __mmc_start_request(host, mrq);
281 295
@@ -525,11 +539,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
525static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, 539static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
526 bool is_first_req) 540 bool is_first_req)
527{ 541{
528 if (host->ops->pre_req) { 542 if (host->ops->pre_req)
529 mmc_host_clk_hold(host);
530 host->ops->pre_req(host, mrq, is_first_req); 543 host->ops->pre_req(host, mrq, is_first_req);
531 mmc_host_clk_release(host);
532 }
533} 544}
534 545
535/** 546/**
@@ -544,11 +555,8 @@ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
544static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, 555static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
545 int err) 556 int err)
546{ 557{
547 if (host->ops->post_req) { 558 if (host->ops->post_req)
548 mmc_host_clk_hold(host);
549 host->ops->post_req(host, mrq, err); 559 host->ops->post_req(host, mrq, err);
550 mmc_host_clk_release(host);
551 }
552} 560}
553 561
554/** 562/**
@@ -833,9 +841,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
833 unsigned int timeout_us, limit_us; 841 unsigned int timeout_us, limit_us;
834 842
835 timeout_us = data->timeout_ns / 1000; 843 timeout_us = data->timeout_ns / 1000;
836 if (mmc_host_clk_rate(card->host)) 844 if (card->host->ios.clock)
837 timeout_us += data->timeout_clks * 1000 / 845 timeout_us += data->timeout_clks * 1000 /
838 (mmc_host_clk_rate(card->host) / 1000); 846 (card->host->ios.clock / 1000);
839 847
840 if (data->flags & MMC_DATA_WRITE) 848 if (data->flags & MMC_DATA_WRITE)
841 /* 849 /*
@@ -1033,8 +1041,6 @@ static inline void mmc_set_ios(struct mmc_host *host)
1033 ios->power_mode, ios->chip_select, ios->vdd, 1041 ios->power_mode, ios->chip_select, ios->vdd,
1034 ios->bus_width, ios->timing); 1042 ios->bus_width, ios->timing);
1035 1043
1036 if (ios->clock > 0)
1037 mmc_set_ungated(host);
1038 host->ops->set_ios(host, ios); 1044 host->ops->set_ios(host, ios);
1039} 1045}
1040 1046
@@ -1043,17 +1049,15 @@ static inline void mmc_set_ios(struct mmc_host *host)
1043 */ 1049 */
1044void mmc_set_chip_select(struct mmc_host *host, int mode) 1050void mmc_set_chip_select(struct mmc_host *host, int mode)
1045{ 1051{
1046 mmc_host_clk_hold(host);
1047 host->ios.chip_select = mode; 1052 host->ios.chip_select = mode;
1048 mmc_set_ios(host); 1053 mmc_set_ios(host);
1049 mmc_host_clk_release(host);
1050} 1054}
1051 1055
1052/* 1056/*
1053 * Sets the host clock to the highest possible frequency that 1057 * Sets the host clock to the highest possible frequency that
1054 * is below "hz". 1058 * is below "hz".
1055 */ 1059 */
1056static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) 1060void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1057{ 1061{
1058 WARN_ON(hz && hz < host->f_min); 1062 WARN_ON(hz && hz < host->f_min);
1059 1063
@@ -1064,68 +1068,6 @@ static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
1064 mmc_set_ios(host); 1068 mmc_set_ios(host);
1065} 1069}
1066 1070
1067void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1068{
1069 mmc_host_clk_hold(host);
1070 __mmc_set_clock(host, hz);
1071 mmc_host_clk_release(host);
1072}
1073
1074#ifdef CONFIG_MMC_CLKGATE
1075/*
1076 * This gates the clock by setting it to 0 Hz.
1077 */
1078void mmc_gate_clock(struct mmc_host *host)
1079{
1080 unsigned long flags;
1081
1082 spin_lock_irqsave(&host->clk_lock, flags);
1083 host->clk_old = host->ios.clock;
1084 host->ios.clock = 0;
1085 host->clk_gated = true;
1086 spin_unlock_irqrestore(&host->clk_lock, flags);
1087 mmc_set_ios(host);
1088}
1089
1090/*
1091 * This restores the clock from gating by using the cached
1092 * clock value.
1093 */
1094void mmc_ungate_clock(struct mmc_host *host)
1095{
1096 /*
1097 * We should previously have gated the clock, so the clock shall
1098 * be 0 here! The clock may however be 0 during initialization,
1099 * when some request operations are performed before setting
1100 * the frequency. When ungate is requested in that situation
1101 * we just ignore the call.
1102 */
1103 if (host->clk_old) {
1104 BUG_ON(host->ios.clock);
1105 /* This call will also set host->clk_gated to false */
1106 __mmc_set_clock(host, host->clk_old);
1107 }
1108}
1109
1110void mmc_set_ungated(struct mmc_host *host)
1111{
1112 unsigned long flags;
1113
1114 /*
1115 * We've been given a new frequency while the clock is gated,
1116 * so make sure we regard this as ungating it.
1117 */
1118 spin_lock_irqsave(&host->clk_lock, flags);
1119 host->clk_gated = false;
1120 spin_unlock_irqrestore(&host->clk_lock, flags);
1121}
1122
1123#else
1124void mmc_set_ungated(struct mmc_host *host)
1125{
1126}
1127#endif
1128
1129int mmc_execute_tuning(struct mmc_card *card) 1071int mmc_execute_tuning(struct mmc_card *card)
1130{ 1072{
1131 struct mmc_host *host = card->host; 1073 struct mmc_host *host = card->host;
@@ -1140,9 +1082,7 @@ int mmc_execute_tuning(struct mmc_card *card)
1140 else 1082 else
1141 opcode = MMC_SEND_TUNING_BLOCK; 1083 opcode = MMC_SEND_TUNING_BLOCK;
1142 1084
1143 mmc_host_clk_hold(host);
1144 err = host->ops->execute_tuning(host, opcode); 1085 err = host->ops->execute_tuning(host, opcode);
1145 mmc_host_clk_release(host);
1146 1086
1147 if (err) 1087 if (err)
1148 pr_err("%s: tuning execution failed\n", mmc_hostname(host)); 1088 pr_err("%s: tuning execution failed\n", mmc_hostname(host));
@@ -1157,10 +1097,8 @@ int mmc_execute_tuning(struct mmc_card *card)
1157 */ 1097 */
1158void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 1098void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1159{ 1099{
1160 mmc_host_clk_hold(host);
1161 host->ios.bus_mode = mode; 1100 host->ios.bus_mode = mode;
1162 mmc_set_ios(host); 1101 mmc_set_ios(host);
1163 mmc_host_clk_release(host);
1164} 1102}
1165 1103
1166/* 1104/*
@@ -1168,10 +1106,8 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1168 */ 1106 */
1169void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 1107void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1170{ 1108{
1171 mmc_host_clk_hold(host);
1172 host->ios.bus_width = width; 1109 host->ios.bus_width = width;
1173 mmc_set_ios(host); 1110 mmc_set_ios(host);
1174 mmc_host_clk_release(host);
1175} 1111}
1176 1112
1177/* 1113/*
@@ -1341,6 +1277,40 @@ struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1341#ifdef CONFIG_REGULATOR 1277#ifdef CONFIG_REGULATOR
1342 1278
1343/** 1279/**
1280 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1281 * @vdd_bit: OCR bit number
1282 * @min_uV: minimum voltage value (mV)
1283 * @max_uV: maximum voltage value (mV)
1284 *
1285 * This function returns the voltage range according to the provided OCR
1286 * bit number. If conversion is not possible a negative errno value returned.
1287 */
1288static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1289{
1290 int tmp;
1291
1292 if (!vdd_bit)
1293 return -EINVAL;
1294
1295 /*
1296 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1297 * bits this regulator doesn't quite support ... don't
1298 * be too picky, most cards and regulators are OK with
1299 * a 0.1V range goof (it's a small error percentage).
1300 */
1301 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1302 if (tmp == 0) {
1303 *min_uV = 1650 * 1000;
1304 *max_uV = 1950 * 1000;
1305 } else {
1306 *min_uV = 1900 * 1000 + tmp * 100 * 1000;
1307 *max_uV = *min_uV + 100 * 1000;
1308 }
1309
1310 return 0;
1311}
1312
1313/**
1344 * mmc_regulator_get_ocrmask - return mask of supported voltages 1314 * mmc_regulator_get_ocrmask - return mask of supported voltages
1345 * @supply: regulator to use 1315 * @supply: regulator to use
1346 * 1316 *
@@ -1403,22 +1373,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
1403 int min_uV, max_uV; 1373 int min_uV, max_uV;
1404 1374
1405 if (vdd_bit) { 1375 if (vdd_bit) {
1406 int tmp; 1376 mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1407
1408 /*
1409 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1410 * bits this regulator doesn't quite support ... don't
1411 * be too picky, most cards and regulators are OK with
1412 * a 0.1V range goof (it's a small error percentage).
1413 */
1414 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1415 if (tmp == 0) {
1416 min_uV = 1650 * 1000;
1417 max_uV = 1950 * 1000;
1418 } else {
1419 min_uV = 1900 * 1000 + tmp * 100 * 1000;
1420 max_uV = min_uV + 100 * 1000;
1421 }
1422 1377
1423 result = regulator_set_voltage(supply, min_uV, max_uV); 1378 result = regulator_set_voltage(supply, min_uV, max_uV);
1424 if (result == 0 && !mmc->regulator_enabled) { 1379 if (result == 0 && !mmc->regulator_enabled) {
@@ -1439,6 +1394,84 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
1439} 1394}
1440EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); 1395EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1441 1396
1397static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1398 int min_uV, int target_uV,
1399 int max_uV)
1400{
1401 /*
1402 * Check if supported first to avoid errors since we may try several
1403 * signal levels during power up and don't want to show errors.
1404 */
1405 if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1406 return -EINVAL;
1407
1408 return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1409 max_uV);
1410}
1411
1412/**
1413 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1414 *
1415 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1416 * That will match the behavior of old boards where VQMMC and VMMC were supplied
1417 * by the same supply. The Bus Operating conditions for 3.3V signaling in the
1418 * SD card spec also define VQMMC in terms of VMMC.
1419 * If this is not possible we'll try the full 2.7-3.6V of the spec.
1420 *
1421 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1422 * requested voltage. This is definitely a good idea for UHS where there's a
1423 * separate regulator on the card that's trying to make 1.8V and it's best if
1424 * we match.
1425 *
1426 * This function is expected to be used by a controller's
1427 * start_signal_voltage_switch() function.
1428 */
1429int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1430{
1431 struct device *dev = mmc_dev(mmc);
1432 int ret, volt, min_uV, max_uV;
1433
1434 /* If no vqmmc supply then we can't change the voltage */
1435 if (IS_ERR(mmc->supply.vqmmc))
1436 return -EINVAL;
1437
1438 switch (ios->signal_voltage) {
1439 case MMC_SIGNAL_VOLTAGE_120:
1440 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1441 1100000, 1200000, 1300000);
1442 case MMC_SIGNAL_VOLTAGE_180:
1443 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1444 1700000, 1800000, 1950000);
1445 case MMC_SIGNAL_VOLTAGE_330:
1446 ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1447 if (ret < 0)
1448 return ret;
1449
1450 dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1451 __func__, volt, max_uV);
1452
1453 min_uV = max(volt - 300000, 2700000);
1454 max_uV = min(max_uV + 200000, 3600000);
1455
1456 /*
1457 * Due to a limitation in the current implementation of
1458 * regulator_set_voltage_triplet() which is taking the lowest
1459 * voltage possible if below the target, search for a suitable
1460 * voltage in two steps and try to stay close to vmmc
1461 * with a 0.3V tolerance at first.
1462 */
1463 if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1464 min_uV, volt, max_uV))
1465 return 0;
1466
1467 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1468 2700000, volt, 3600000);
1469 default:
1470 return -EINVAL;
1471 }
1472}
1473EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1474
1442#endif /* CONFIG_REGULATOR */ 1475#endif /* CONFIG_REGULATOR */
1443 1476
1444int mmc_regulator_get_supply(struct mmc_host *mmc) 1477int mmc_regulator_get_supply(struct mmc_host *mmc)
@@ -1515,11 +1548,8 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1515 int old_signal_voltage = host->ios.signal_voltage; 1548 int old_signal_voltage = host->ios.signal_voltage;
1516 1549
1517 host->ios.signal_voltage = signal_voltage; 1550 host->ios.signal_voltage = signal_voltage;
1518 if (host->ops->start_signal_voltage_switch) { 1551 if (host->ops->start_signal_voltage_switch)
1519 mmc_host_clk_hold(host);
1520 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1552 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1521 mmc_host_clk_release(host);
1522 }
1523 1553
1524 if (err) 1554 if (err)
1525 host->ios.signal_voltage = old_signal_voltage; 1555 host->ios.signal_voltage = old_signal_voltage;
@@ -1553,20 +1583,17 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1553 pr_warn("%s: cannot verify signal voltage switch\n", 1583 pr_warn("%s: cannot verify signal voltage switch\n",
1554 mmc_hostname(host)); 1584 mmc_hostname(host));
1555 1585
1556 mmc_host_clk_hold(host);
1557
1558 cmd.opcode = SD_SWITCH_VOLTAGE; 1586 cmd.opcode = SD_SWITCH_VOLTAGE;
1559 cmd.arg = 0; 1587 cmd.arg = 0;
1560 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1588 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1561 1589
1562 err = mmc_wait_for_cmd(host, &cmd, 0); 1590 err = mmc_wait_for_cmd(host, &cmd, 0);
1563 if (err) 1591 if (err)
1564 goto err_command; 1592 return err;
1593
1594 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1595 return -EIO;
1565 1596
1566 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
1567 err = -EIO;
1568 goto err_command;
1569 }
1570 /* 1597 /*
1571 * The card should drive cmd and dat[0:3] low immediately 1598 * The card should drive cmd and dat[0:3] low immediately
1572 * after the response of cmd11, but wait 1 ms to be sure 1599 * after the response of cmd11, but wait 1 ms to be sure
@@ -1615,9 +1642,6 @@ power_cycle:
1615 mmc_power_cycle(host, ocr); 1642 mmc_power_cycle(host, ocr);
1616 } 1643 }
1617 1644
1618err_command:
1619 mmc_host_clk_release(host);
1620
1621 return err; 1645 return err;
1622} 1646}
1623 1647
@@ -1626,10 +1650,8 @@ err_command:
1626 */ 1650 */
1627void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1651void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1628{ 1652{
1629 mmc_host_clk_hold(host);
1630 host->ios.timing = timing; 1653 host->ios.timing = timing;
1631 mmc_set_ios(host); 1654 mmc_set_ios(host);
1632 mmc_host_clk_release(host);
1633} 1655}
1634 1656
1635/* 1657/*
@@ -1637,10 +1659,8 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1637 */ 1659 */
1638void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1660void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1639{ 1661{
1640 mmc_host_clk_hold(host);
1641 host->ios.drv_type = drv_type; 1662 host->ios.drv_type = drv_type;
1642 mmc_set_ios(host); 1663 mmc_set_ios(host);
1643 mmc_host_clk_release(host);
1644} 1664}
1645 1665
1646int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr, 1666int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
@@ -1648,7 +1668,6 @@ int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1648{ 1668{
1649 struct mmc_host *host = card->host; 1669 struct mmc_host *host = card->host;
1650 int host_drv_type = SD_DRIVER_TYPE_B; 1670 int host_drv_type = SD_DRIVER_TYPE_B;
1651 int drive_strength;
1652 1671
1653 *drv_type = 0; 1672 *drv_type = 0;
1654 1673
@@ -1671,14 +1690,10 @@ int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1671 * information and let the hardware specific code 1690 * information and let the hardware specific code
1672 * return what is possible given the options 1691 * return what is possible given the options
1673 */ 1692 */
1674 mmc_host_clk_hold(host); 1693 return host->ops->select_drive_strength(card, max_dtr,
1675 drive_strength = host->ops->select_drive_strength(card, max_dtr, 1694 host_drv_type,
1676 host_drv_type, 1695 card_drv_type,
1677 card_drv_type, 1696 drv_type);
1678 drv_type);
1679 mmc_host_clk_release(host);
1680
1681 return drive_strength;
1682} 1697}
1683 1698
1684/* 1699/*
@@ -1697,8 +1712,6 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
1697 if (host->ios.power_mode == MMC_POWER_ON) 1712 if (host->ios.power_mode == MMC_POWER_ON)
1698 return; 1713 return;
1699 1714
1700 mmc_host_clk_hold(host);
1701
1702 mmc_pwrseq_pre_power_on(host); 1715 mmc_pwrseq_pre_power_on(host);
1703 1716
1704 host->ios.vdd = fls(ocr) - 1; 1717 host->ios.vdd = fls(ocr) - 1;
@@ -1732,8 +1745,6 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
1732 * time required to reach a stable voltage. 1745 * time required to reach a stable voltage.
1733 */ 1746 */
1734 mmc_delay(10); 1747 mmc_delay(10);
1735
1736 mmc_host_clk_release(host);
1737} 1748}
1738 1749
1739void mmc_power_off(struct mmc_host *host) 1750void mmc_power_off(struct mmc_host *host)
@@ -1741,8 +1752,6 @@ void mmc_power_off(struct mmc_host *host)
1741 if (host->ios.power_mode == MMC_POWER_OFF) 1752 if (host->ios.power_mode == MMC_POWER_OFF)
1742 return; 1753 return;
1743 1754
1744 mmc_host_clk_hold(host);
1745
1746 mmc_pwrseq_power_off(host); 1755 mmc_pwrseq_power_off(host);
1747 1756
1748 host->ios.clock = 0; 1757 host->ios.clock = 0;
@@ -1758,8 +1767,6 @@ void mmc_power_off(struct mmc_host *host)
1758 * can be successfully turned on again. 1767 * can be successfully turned on again.
1759 */ 1768 */
1760 mmc_delay(1); 1769 mmc_delay(1);
1761
1762 mmc_host_clk_release(host);
1763} 1770}
1764 1771
1765void mmc_power_cycle(struct mmc_host *host, u32 ocr) 1772void mmc_power_cycle(struct mmc_host *host, u32 ocr)
@@ -1975,7 +1982,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1975 */ 1982 */
1976 timeout_clks <<= 1; 1983 timeout_clks <<= 1;
1977 timeout_us += (timeout_clks * 1000) / 1984 timeout_us += (timeout_clks * 1000) /
1978 (mmc_host_clk_rate(card->host) / 1000); 1985 (card->host->ios.clock / 1000);
1979 1986
1980 erase_timeout = timeout_us / 1000; 1987 erase_timeout = timeout_us / 1000;
1981 1988
@@ -2423,9 +2430,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
2423{ 2430{
2424 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 2431 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2425 return; 2432 return;
2426 mmc_host_clk_hold(host);
2427 host->ops->hw_reset(host); 2433 host->ops->hw_reset(host);
2428 mmc_host_clk_release(host);
2429} 2434}
2430 2435
2431int mmc_hw_reset(struct mmc_host *host) 2436int mmc_hw_reset(struct mmc_host *host)
@@ -2633,10 +2638,14 @@ void mmc_start_host(struct mmc_host *host)
2633 host->f_init = max(freqs[0], host->f_min); 2638 host->f_init = max(freqs[0], host->f_min);
2634 host->rescan_disable = 0; 2639 host->rescan_disable = 0;
2635 host->ios.power_mode = MMC_POWER_UNDEFINED; 2640 host->ios.power_mode = MMC_POWER_UNDEFINED;
2641
2642 mmc_claim_host(host);
2636 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) 2643 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2637 mmc_power_off(host); 2644 mmc_power_off(host);
2638 else 2645 else
2639 mmc_power_up(host, host->ocr_avail); 2646 mmc_power_up(host, host->ocr_avail);
2647 mmc_release_host(host);
2648
2640 mmc_gpiod_request_cd_irq(host); 2649 mmc_gpiod_request_cd_irq(host);
2641 _mmc_detect_change(host, 0, false); 2650 _mmc_detect_change(host, 0, false);
2642} 2651}
@@ -2674,7 +2683,9 @@ void mmc_stop_host(struct mmc_host *host)
2674 2683
2675 BUG_ON(host->card); 2684 BUG_ON(host->card);
2676 2685
2686 mmc_claim_host(host);
2677 mmc_power_off(host); 2687 mmc_power_off(host);
2688 mmc_release_host(host);
2678} 2689}
2679 2690
2680int mmc_power_save_host(struct mmc_host *host) 2691int mmc_power_save_host(struct mmc_host *host)
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 1a22a82209b2..09241e56d628 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -40,9 +40,6 @@ void mmc_init_erase(struct mmc_card *card);
40 40
41void mmc_set_chip_select(struct mmc_host *host, int mode); 41void mmc_set_chip_select(struct mmc_host *host, int mode);
42void mmc_set_clock(struct mmc_host *host, unsigned int hz); 42void mmc_set_clock(struct mmc_host *host, unsigned int hz);
43void mmc_gate_clock(struct mmc_host *host);
44void mmc_ungate_clock(struct mmc_host *host);
45void mmc_set_ungated(struct mmc_host *host);
46void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); 43void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
47void mmc_set_bus_width(struct mmc_host *host, unsigned int width); 44void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
48u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); 45u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index e9142108a6c6..154aced0b91b 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -126,6 +126,12 @@ static int mmc_ios_show(struct seq_file *s, void *data)
126 case MMC_TIMING_SD_HS: 126 case MMC_TIMING_SD_HS:
127 str = "sd high-speed"; 127 str = "sd high-speed";
128 break; 128 break;
129 case MMC_TIMING_UHS_SDR12:
130 str = "sd uhs SDR12";
131 break;
132 case MMC_TIMING_UHS_SDR25:
133 str = "sd uhs SDR25";
134 break;
129 case MMC_TIMING_UHS_SDR50: 135 case MMC_TIMING_UHS_SDR50:
130 str = "sd uhs SDR50"; 136 str = "sd uhs SDR50";
131 break; 137 break;
@@ -166,6 +172,25 @@ static int mmc_ios_show(struct seq_file *s, void *data)
166 } 172 }
167 seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str); 173 seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str);
168 174
175 switch (ios->drv_type) {
176 case MMC_SET_DRIVER_TYPE_A:
177 str = "driver type A";
178 break;
179 case MMC_SET_DRIVER_TYPE_B:
180 str = "driver type B";
181 break;
182 case MMC_SET_DRIVER_TYPE_C:
183 str = "driver type C";
184 break;
185 case MMC_SET_DRIVER_TYPE_D:
186 str = "driver type D";
187 break;
188 default:
189 str = "invalid";
190 break;
191 }
192 seq_printf(s, "driver type:\t%u (%s)\n", ios->drv_type, str);
193
169 return 0; 194 return 0;
170} 195}
171 196
@@ -230,11 +255,6 @@ void mmc_add_host_debugfs(struct mmc_host *host)
230 &mmc_clock_fops)) 255 &mmc_clock_fops))
231 goto err_node; 256 goto err_node;
232 257
233#ifdef CONFIG_MMC_CLKGATE
234 if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
235 root, &host->clk_delay))
236 goto err_node;
237#endif
238#ifdef CONFIG_FAIL_MMC_REQUEST 258#ifdef CONFIG_FAIL_MMC_REQUEST
239 if (fail_request) 259 if (fail_request)
240 setup_fault_attr(&fail_default_attr, fail_request); 260 setup_fault_attr(&fail_default_attr, fail_request);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 5466f25f0281..da950c44204d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -61,246 +61,6 @@ void mmc_unregister_host_class(void)
61 class_unregister(&mmc_host_class); 61 class_unregister(&mmc_host_class);
62} 62}
63 63
64#ifdef CONFIG_MMC_CLKGATE
65static ssize_t clkgate_delay_show(struct device *dev,
66 struct device_attribute *attr, char *buf)
67{
68 struct mmc_host *host = cls_dev_to_mmc_host(dev);
69 return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
70}
71
72static ssize_t clkgate_delay_store(struct device *dev,
73 struct device_attribute *attr, const char *buf, size_t count)
74{
75 struct mmc_host *host = cls_dev_to_mmc_host(dev);
76 unsigned long flags, value;
77
78 if (kstrtoul(buf, 0, &value))
79 return -EINVAL;
80
81 spin_lock_irqsave(&host->clk_lock, flags);
82 host->clkgate_delay = value;
83 spin_unlock_irqrestore(&host->clk_lock, flags);
84 return count;
85}
86
87/*
88 * Enabling clock gating will make the core call out to the host
89 * once up and once down when it performs a request or card operation
90 * intermingled in any fashion. The driver will see this through
91 * set_ios() operations with ios.clock field set to 0 to gate (disable)
92 * the block clock, and to the old frequency to enable it again.
93 */
94static void mmc_host_clk_gate_delayed(struct mmc_host *host)
95{
96 unsigned long tick_ns;
97 unsigned long freq = host->ios.clock;
98 unsigned long flags;
99
100 if (!freq) {
101 pr_debug("%s: frequency set to 0 in disable function, "
102 "this means the clock is already disabled.\n",
103 mmc_hostname(host));
104 return;
105 }
106 /*
107 * New requests may have appeared while we were scheduling,
108 * then there is no reason to delay the check before
109 * clk_disable().
110 */
111 spin_lock_irqsave(&host->clk_lock, flags);
112
113 /*
114 * Delay n bus cycles (at least 8 from MMC spec) before attempting
115 * to disable the MCI block clock. The reference count may have
116 * gone up again after this delay due to rescheduling!
117 */
118 if (!host->clk_requests) {
119 spin_unlock_irqrestore(&host->clk_lock, flags);
120 tick_ns = DIV_ROUND_UP(1000000000, freq);
121 ndelay(host->clk_delay * tick_ns);
122 } else {
123 /* New users appeared while waiting for this work */
124 spin_unlock_irqrestore(&host->clk_lock, flags);
125 return;
126 }
127 mutex_lock(&host->clk_gate_mutex);
128 spin_lock_irqsave(&host->clk_lock, flags);
129 if (!host->clk_requests) {
130 spin_unlock_irqrestore(&host->clk_lock, flags);
131 /* This will set host->ios.clock to 0 */
132 mmc_gate_clock(host);
133 spin_lock_irqsave(&host->clk_lock, flags);
134 pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
135 }
136 spin_unlock_irqrestore(&host->clk_lock, flags);
137 mutex_unlock(&host->clk_gate_mutex);
138}
139
140/*
141 * Internal work. Work to disable the clock at some later point.
142 */
143static void mmc_host_clk_gate_work(struct work_struct *work)
144{
145 struct mmc_host *host = container_of(work, struct mmc_host,
146 clk_gate_work.work);
147
148 mmc_host_clk_gate_delayed(host);
149}
150
151/**
152 * mmc_host_clk_hold - ungate hardware MCI clocks
153 * @host: host to ungate.
154 *
155 * Makes sure the host ios.clock is restored to a non-zero value
156 * past this call. Increase clock reference count and ungate clock
157 * if we're the first user.
158 */
159void mmc_host_clk_hold(struct mmc_host *host)
160{
161 unsigned long flags;
162
163 /* cancel any clock gating work scheduled by mmc_host_clk_release() */
164 cancel_delayed_work_sync(&host->clk_gate_work);
165 mutex_lock(&host->clk_gate_mutex);
166 spin_lock_irqsave(&host->clk_lock, flags);
167 if (host->clk_gated) {
168 spin_unlock_irqrestore(&host->clk_lock, flags);
169 mmc_ungate_clock(host);
170 spin_lock_irqsave(&host->clk_lock, flags);
171 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
172 }
173 host->clk_requests++;
174 spin_unlock_irqrestore(&host->clk_lock, flags);
175 mutex_unlock(&host->clk_gate_mutex);
176}
177
178/**
179 * mmc_host_may_gate_card - check if this card may be gated
180 * @card: card to check.
181 */
182static bool mmc_host_may_gate_card(struct mmc_card *card)
183{
184 /* If there is no card we may gate it */
185 if (!card)
186 return true;
187 /*
188 * Don't gate SDIO cards! These need to be clocked at all times
189 * since they may be independent systems generating interrupts
190 * and other events. The clock requests counter from the core will
191 * go down to zero since the core does not need it, but we will not
192 * gate the clock, because there is somebody out there that may still
193 * be using it.
194 */
195 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
196}
197
198/**
199 * mmc_host_clk_release - gate off hardware MCI clocks
200 * @host: host to gate.
201 *
202 * Calls the host driver with ios.clock set to zero as often as possible
203 * in order to gate off hardware MCI clocks. Decrease clock reference
204 * count and schedule disabling of clock.
205 */
206void mmc_host_clk_release(struct mmc_host *host)
207{
208 unsigned long flags;
209
210 spin_lock_irqsave(&host->clk_lock, flags);
211 host->clk_requests--;
212 if (mmc_host_may_gate_card(host->card) &&
213 !host->clk_requests)
214 schedule_delayed_work(&host->clk_gate_work,
215 msecs_to_jiffies(host->clkgate_delay));
216 spin_unlock_irqrestore(&host->clk_lock, flags);
217}
218
219/**
220 * mmc_host_clk_rate - get current clock frequency setting
221 * @host: host to get the clock frequency for.
222 *
223 * Returns current clock frequency regardless of gating.
224 */
225unsigned int mmc_host_clk_rate(struct mmc_host *host)
226{
227 unsigned long freq;
228 unsigned long flags;
229
230 spin_lock_irqsave(&host->clk_lock, flags);
231 if (host->clk_gated)
232 freq = host->clk_old;
233 else
234 freq = host->ios.clock;
235 spin_unlock_irqrestore(&host->clk_lock, flags);
236 return freq;
237}
238
239/**
240 * mmc_host_clk_init - set up clock gating code
241 * @host: host with potential clock to control
242 */
243static inline void mmc_host_clk_init(struct mmc_host *host)
244{
245 host->clk_requests = 0;
246 /* Hold MCI clock for 8 cycles by default */
247 host->clk_delay = 8;
248 /*
249 * Default clock gating delay is 0ms to avoid wasting power.
250 * This value can be tuned by writing into sysfs entry.
251 */
252 host->clkgate_delay = 0;
253 host->clk_gated = false;
254 INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
255 spin_lock_init(&host->clk_lock);
256 mutex_init(&host->clk_gate_mutex);
257}
258
259/**
260 * mmc_host_clk_exit - shut down clock gating code
261 * @host: host with potential clock to control
262 */
263static inline void mmc_host_clk_exit(struct mmc_host *host)
264{
265 /*
266 * Wait for any outstanding gate and then make sure we're
267 * ungated before exiting.
268 */
269 if (cancel_delayed_work_sync(&host->clk_gate_work))
270 mmc_host_clk_gate_delayed(host);
271 if (host->clk_gated)
272 mmc_host_clk_hold(host);
273 /* There should be only one user now */
274 WARN_ON(host->clk_requests > 1);
275}
276
277static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
278{
279 host->clkgate_delay_attr.show = clkgate_delay_show;
280 host->clkgate_delay_attr.store = clkgate_delay_store;
281 sysfs_attr_init(&host->clkgate_delay_attr.attr);
282 host->clkgate_delay_attr.attr.name = "clkgate_delay";
283 host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
284 if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
285 pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
286 mmc_hostname(host));
287}
288#else
289
290static inline void mmc_host_clk_init(struct mmc_host *host)
291{
292}
293
294static inline void mmc_host_clk_exit(struct mmc_host *host)
295{
296}
297
298static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
299{
300}
301
302#endif
303
304void mmc_retune_enable(struct mmc_host *host) 64void mmc_retune_enable(struct mmc_host *host)
305{ 65{
306 host->can_retune = 1; 66 host->can_retune = 1;
@@ -507,6 +267,8 @@ int mmc_of_parse(struct mmc_host *host)
507 host->caps |= MMC_CAP_UHS_DDR50; 267 host->caps |= MMC_CAP_UHS_DDR50;
508 if (of_property_read_bool(np, "cap-power-off-card")) 268 if (of_property_read_bool(np, "cap-power-off-card"))
509 host->caps |= MMC_CAP_POWER_OFF_CARD; 269 host->caps |= MMC_CAP_POWER_OFF_CARD;
270 if (of_property_read_bool(np, "cap-mmc-hw-reset"))
271 host->caps |= MMC_CAP_HW_RESET;
510 if (of_property_read_bool(np, "cap-sdio-irq")) 272 if (of_property_read_bool(np, "cap-sdio-irq"))
511 host->caps |= MMC_CAP_SDIO_IRQ; 273 host->caps |= MMC_CAP_SDIO_IRQ;
512 if (of_property_read_bool(np, "full-pwr-cycle")) 274 if (of_property_read_bool(np, "full-pwr-cycle"))
@@ -583,8 +345,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
583 return NULL; 345 return NULL;
584 } 346 }
585 347
586 mmc_host_clk_init(host);
587
588 spin_lock_init(&host->lock); 348 spin_lock_init(&host->lock);
589 init_waitqueue_head(&host->wq); 349 init_waitqueue_head(&host->wq);
590 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 350 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -633,7 +393,6 @@ int mmc_add_host(struct mmc_host *host)
633#ifdef CONFIG_DEBUG_FS 393#ifdef CONFIG_DEBUG_FS
634 mmc_add_host_debugfs(host); 394 mmc_add_host_debugfs(host);
635#endif 395#endif
636 mmc_host_clk_sysfs_init(host);
637 396
638 mmc_start_host(host); 397 mmc_start_host(host);
639 register_pm_notifier(&host->pm_notify); 398 register_pm_notifier(&host->pm_notify);
@@ -663,8 +422,6 @@ void mmc_remove_host(struct mmc_host *host)
663 device_del(&host->class_dev); 422 device_del(&host->class_dev);
664 423
665 led_trigger_unregister_simple(host->led); 424 led_trigger_unregister_simple(host->led);
666
667 mmc_host_clk_exit(host);
668} 425}
669 426
670EXPORT_SYMBOL(mmc_remove_host); 427EXPORT_SYMBOL(mmc_remove_host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index e726903170a8..c793fda27321 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1924,7 +1924,6 @@ EXPORT_SYMBOL(mmc_can_reset);
1924static int mmc_reset(struct mmc_host *host) 1924static int mmc_reset(struct mmc_host *host)
1925{ 1925{
1926 struct mmc_card *card = host->card; 1926 struct mmc_card *card = host->card;
1927 u32 status;
1928 1927
1929 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1928 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1930 return -EOPNOTSUPP; 1929 return -EOPNOTSUPP;
@@ -1932,20 +1931,12 @@ static int mmc_reset(struct mmc_host *host)
1932 if (!mmc_can_reset(card)) 1931 if (!mmc_can_reset(card))
1933 return -EOPNOTSUPP; 1932 return -EOPNOTSUPP;
1934 1933
1935 mmc_host_clk_hold(host);
1936 mmc_set_clock(host, host->f_init); 1934 mmc_set_clock(host, host->f_init);
1937 1935
1938 host->ops->hw_reset(host); 1936 host->ops->hw_reset(host);
1939 1937
1940 /* If the reset has happened, then a status command will fail */
1941 if (!mmc_send_status(card, &status)) {
1942 mmc_host_clk_release(host);
1943 return -ENOSYS;
1944 }
1945
1946 /* Set initial state and call mmc_set_ios */ 1938 /* Set initial state and call mmc_set_ios */
1947 mmc_set_initial_state(host); 1939 mmc_set_initial_state(host);
1948 mmc_host_clk_release(host);
1949 1940
1950 return mmc_init_card(host, card->ocr, card); 1941 return mmc_init_card(host, card->ocr, card);
1951} 1942}
@@ -2013,14 +2004,13 @@ int mmc_attach_mmc(struct mmc_host *host)
2013 2004
2014 mmc_release_host(host); 2005 mmc_release_host(host);
2015 err = mmc_add_card(host->card); 2006 err = mmc_add_card(host->card);
2016 mmc_claim_host(host);
2017 if (err) 2007 if (err)
2018 goto remove_card; 2008 goto remove_card;
2019 2009
2010 mmc_claim_host(host);
2020 return 0; 2011 return 0;
2021 2012
2022remove_card: 2013remove_card:
2023 mmc_release_host(host);
2024 mmc_remove_card(host->card); 2014 mmc_remove_card(host->card);
2025 mmc_claim_host(host); 2015 mmc_claim_host(host);
2026 host->card = NULL; 2016 host->card = NULL;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 0e9ae1c276c8..1f444269ebbe 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -579,7 +579,6 @@ out:
579 579
580 return err; 580 return err;
581} 581}
582EXPORT_SYMBOL_GPL(__mmc_switch);
583 582
584int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, 583int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
585 unsigned int timeout_ms) 584 unsigned int timeout_ms)
@@ -589,7 +588,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
589} 588}
590EXPORT_SYMBOL_GPL(mmc_switch); 589EXPORT_SYMBOL_GPL(mmc_switch);
591 590
592int mmc_send_tuning(struct mmc_host *host) 591int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
593{ 592{
594 struct mmc_request mrq = {NULL}; 593 struct mmc_request mrq = {NULL};
595 struct mmc_command cmd = {0}; 594 struct mmc_command cmd = {0};
@@ -599,16 +598,13 @@ int mmc_send_tuning(struct mmc_host *host)
599 const u8 *tuning_block_pattern; 598 const u8 *tuning_block_pattern;
600 int size, err = 0; 599 int size, err = 0;
601 u8 *data_buf; 600 u8 *data_buf;
602 u32 opcode;
603 601
604 if (ios->bus_width == MMC_BUS_WIDTH_8) { 602 if (ios->bus_width == MMC_BUS_WIDTH_8) {
605 tuning_block_pattern = tuning_blk_pattern_8bit; 603 tuning_block_pattern = tuning_blk_pattern_8bit;
606 size = sizeof(tuning_blk_pattern_8bit); 604 size = sizeof(tuning_blk_pattern_8bit);
607 opcode = MMC_SEND_TUNING_BLOCK_HS200;
608 } else if (ios->bus_width == MMC_BUS_WIDTH_4) { 605 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
609 tuning_block_pattern = tuning_blk_pattern_4bit; 606 tuning_block_pattern = tuning_blk_pattern_4bit;
610 size = sizeof(tuning_blk_pattern_4bit); 607 size = sizeof(tuning_blk_pattern_4bit);
611 opcode = MMC_SEND_TUNING_BLOCK;
612 } else 608 } else
613 return -EINVAL; 609 return -EINVAL;
614 610
@@ -639,6 +635,9 @@ int mmc_send_tuning(struct mmc_host *host)
639 635
640 mmc_wait_for_req(host, &mrq); 636 mmc_wait_for_req(host, &mrq);
641 637
638 if (cmd_error)
639 *cmd_error = cmd.error;
640
642 if (cmd.error) { 641 if (cmd.error) {
643 err = cmd.error; 642 err = cmd.error;
644 goto out; 643 goto out;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index f498f9ae21f0..f1b8e81aaa28 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -28,6 +28,9 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width);
28int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status); 28int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
29int mmc_can_ext_csd(struct mmc_card *card); 29int mmc_can_ext_csd(struct mmc_card *card);
30int mmc_switch_status_error(struct mmc_host *host, u32 status); 30int mmc_switch_status_error(struct mmc_host *host, u32 status);
31int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
32 unsigned int timeout_ms, bool use_busy_signal, bool send_status,
33 bool ignore_crc);
31 34
32#endif 35#endif
33 36
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index 9d6d2fb21796..ad4f94ec7e8d 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -76,7 +76,7 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
76 if (!pwrseq) 76 if (!pwrseq)
77 return ERR_PTR(-ENOMEM); 77 return ERR_PTR(-ENOMEM);
78 78
79 pwrseq->reset_gpio = gpiod_get_index(dev, "reset", 0, GPIOD_OUT_LOW); 79 pwrseq->reset_gpio = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
80 if (IS_ERR(pwrseq->reset_gpio)) { 80 if (IS_ERR(pwrseq->reset_gpio)) {
81 ret = PTR_ERR(pwrseq->reset_gpio); 81 ret = PTR_ERR(pwrseq->reset_gpio);
82 goto free; 82 goto free;
@@ -84,11 +84,11 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
84 84
85 /* 85 /*
86 * register reset handler to ensure emmc reset also from 86 * register reset handler to ensure emmc reset also from
87 * emergency_reboot(), priority 129 schedules it just before 87 * emergency_reboot(), priority 255 is the highest priority
88 * system reboot 88 * so it will be executed before any system reboot handler.
89 */ 89 */
90 pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb; 90 pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
91 pwrseq->reset_nb.priority = 129; 91 pwrseq->reset_nb.priority = 255;
92 register_restart_handler(&pwrseq->reset_nb); 92 register_restart_handler(&pwrseq->reset_nb);
93 93
94 pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops; 94 pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 0b14b83a53d6..d10538bb5e07 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -23,18 +23,21 @@ struct mmc_pwrseq_simple {
23 struct mmc_pwrseq pwrseq; 23 struct mmc_pwrseq pwrseq;
24 bool clk_enabled; 24 bool clk_enabled;
25 struct clk *ext_clk; 25 struct clk *ext_clk;
26 int nr_gpios; 26 struct gpio_descs *reset_gpios;
27 struct gpio_desc *reset_gpios[0];
28}; 27};
29 28
30static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq, 29static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
31 int value) 30 int value)
32{ 31{
33 int i; 32 int i;
33 struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
34 int values[reset_gpios->ndescs];
34 35
35 for (i = 0; i < pwrseq->nr_gpios; i++) 36 for (i = 0; i < reset_gpios->ndescs; i++)
36 if (!IS_ERR(pwrseq->reset_gpios[i])) 37 values[i] = value;
37 gpiod_set_value_cansleep(pwrseq->reset_gpios[i], value); 38
39 gpiod_set_array_value_cansleep(reset_gpios->ndescs, reset_gpios->desc,
40 values);
38} 41}
39 42
40static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host) 43static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
@@ -75,11 +78,8 @@ static void mmc_pwrseq_simple_free(struct mmc_host *host)
75{ 78{
76 struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq, 79 struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
77 struct mmc_pwrseq_simple, pwrseq); 80 struct mmc_pwrseq_simple, pwrseq);
78 int i;
79 81
80 for (i = 0; i < pwrseq->nr_gpios; i++) 82 gpiod_put_array(pwrseq->reset_gpios);
81 if (!IS_ERR(pwrseq->reset_gpios[i]))
82 gpiod_put(pwrseq->reset_gpios[i]);
83 83
84 if (!IS_ERR(pwrseq->ext_clk)) 84 if (!IS_ERR(pwrseq->ext_clk))
85 clk_put(pwrseq->ext_clk); 85 clk_put(pwrseq->ext_clk);
@@ -98,14 +98,9 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
98 struct device *dev) 98 struct device *dev)
99{ 99{
100 struct mmc_pwrseq_simple *pwrseq; 100 struct mmc_pwrseq_simple *pwrseq;
101 int i, nr_gpios, ret = 0; 101 int ret = 0;
102
103 nr_gpios = of_gpio_named_count(dev->of_node, "reset-gpios");
104 if (nr_gpios < 0)
105 nr_gpios = 0;
106 102
107 pwrseq = kzalloc(sizeof(struct mmc_pwrseq_simple) + nr_gpios * 103 pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
108 sizeof(struct gpio_desc *), GFP_KERNEL);
109 if (!pwrseq) 104 if (!pwrseq)
110 return ERR_PTR(-ENOMEM); 105 return ERR_PTR(-ENOMEM);
111 106
@@ -116,22 +111,12 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
116 goto free; 111 goto free;
117 } 112 }
118 113
119 for (i = 0; i < nr_gpios; i++) { 114 pwrseq->reset_gpios = gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
120 pwrseq->reset_gpios[i] = gpiod_get_index(dev, "reset", i, 115 if (IS_ERR(pwrseq->reset_gpios)) {
121 GPIOD_OUT_HIGH); 116 ret = PTR_ERR(pwrseq->reset_gpios);
122 if (IS_ERR(pwrseq->reset_gpios[i]) && 117 goto clk_put;
123 PTR_ERR(pwrseq->reset_gpios[i]) != -ENOENT &&
124 PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
125 ret = PTR_ERR(pwrseq->reset_gpios[i]);
126
127 while (i--)
128 gpiod_put(pwrseq->reset_gpios[i]);
129
130 goto clk_put;
131 }
132 } 118 }
133 119
134 pwrseq->nr_gpios = nr_gpios;
135 pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops; 120 pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
136 121
137 return &pwrseq->pwrseq; 122 return &pwrseq->pwrseq;
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index dd1d1e0fe322..fad660b95809 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -35,25 +35,7 @@
35#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128 35#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
36#endif 36#endif
37 37
38/*
39 * This hook just adds a quirk for all sdio devices
40 */
41static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
42{
43 if (mmc_card_sdio(card))
44 card->quirks |= data;
45}
46
47static const struct mmc_fixup mmc_fixup_methods[] = { 38static const struct mmc_fixup mmc_fixup_methods[] = {
48 /* by default sdio devices are considered CLK_GATING broken */
49 /* good cards will be whitelisted as they are tested */
50 SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
51 add_quirk_for_sdio_devices,
52 MMC_QUIRK_BROKEN_CLK_GATING),
53
54 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
55 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
56
57 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, 39 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
58 add_quirk, MMC_QUIRK_NONSTD_FUNC_IF), 40 add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
59 41
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4e7366ab187f..141eaa923e18 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -357,8 +357,6 @@ int mmc_sd_switch_hs(struct mmc_card *card)
357 if (card->sw_caps.hs_max_dtr == 0) 357 if (card->sw_caps.hs_max_dtr == 0)
358 return 0; 358 return 0;
359 359
360 err = -EIO;
361
362 status = kmalloc(64, GFP_KERNEL); 360 status = kmalloc(64, GFP_KERNEL);
363 if (!status) { 361 if (!status) {
364 pr_err("%s: could not allocate a buffer for " 362 pr_err("%s: could not allocate a buffer for "
@@ -628,9 +626,25 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
628 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104. 626 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
629 */ 627 */
630 if (!mmc_host_is_spi(card->host) && 628 if (!mmc_host_is_spi(card->host) &&
631 (card->sd_bus_speed == UHS_SDR50_BUS_SPEED || 629 (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
632 card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) 630 card->sd_bus_speed == UHS_DDR50_BUS_SPEED ||
631 card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
633 err = mmc_execute_tuning(card); 632 err = mmc_execute_tuning(card);
633
634 /*
635 * As SD Specifications Part1 Physical Layer Specification
636 * Version 3.01 says, CMD19 tuning is available for unlocked
637 * cards in transfer state of 1.8V signaling mode. The small
638 * difference between v3.00 and 3.01 spec means that CMD19
639 * tuning is also available for DDR50 mode.
640 */
641 if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) {
642 pr_warn("%s: ddr50 tuning failed\n",
643 mmc_hostname(card->host));
644 err = 0;
645 }
646 }
647
634out: 648out:
635 kfree(status); 649 kfree(status);
636 650
@@ -786,9 +800,7 @@ static int mmc_sd_get_ro(struct mmc_host *host)
786 if (!host->ops->get_ro) 800 if (!host->ops->get_ro)
787 return -1; 801 return -1;
788 802
789 mmc_host_clk_hold(host);
790 ro = host->ops->get_ro(host); 803 ro = host->ops->get_ro(host);
791 mmc_host_clk_release(host);
792 804
793 return ro; 805 return ro;
794} 806}
@@ -1231,14 +1243,13 @@ int mmc_attach_sd(struct mmc_host *host)
1231 1243
1232 mmc_release_host(host); 1244 mmc_release_host(host);
1233 err = mmc_add_card(host->card); 1245 err = mmc_add_card(host->card);
1234 mmc_claim_host(host);
1235 if (err) 1246 if (err)
1236 goto remove_card; 1247 goto remove_card;
1237 1248
1249 mmc_claim_host(host);
1238 return 0; 1250 return 0;
1239 1251
1240remove_card: 1252remove_card:
1241 mmc_release_host(host);
1242 mmc_remove_card(host->card); 1253 mmc_remove_card(host->card);
1243 host->card = NULL; 1254 host->card = NULL;
1244 mmc_claim_host(host); 1255 mmc_claim_host(host);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index b91abedcfdca..16d838e6d623 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -897,11 +897,10 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
897 */ 897 */
898static int mmc_sdio_suspend(struct mmc_host *host) 898static int mmc_sdio_suspend(struct mmc_host *host)
899{ 899{
900 if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { 900 mmc_claim_host(host);
901 mmc_claim_host(host); 901
902 if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
902 sdio_disable_wide(host->card); 903 sdio_disable_wide(host->card);
903 mmc_release_host(host);
904 }
905 904
906 if (!mmc_card_keep_power(host)) { 905 if (!mmc_card_keep_power(host)) {
907 mmc_power_off(host); 906 mmc_power_off(host);
@@ -910,6 +909,8 @@ static int mmc_sdio_suspend(struct mmc_host *host)
910 mmc_retune_needed(host); 909 mmc_retune_needed(host);
911 } 910 }
912 911
912 mmc_release_host(host);
913
913 return 0; 914 return 0;
914} 915}
915 916
@@ -955,13 +956,10 @@ static int mmc_sdio_resume(struct mmc_host *host)
955 } 956 }
956 957
957 if (!err && host->sdio_irqs) { 958 if (!err && host->sdio_irqs) {
958 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { 959 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
959 wake_up_process(host->sdio_irq_thread); 960 wake_up_process(host->sdio_irq_thread);
960 } else if (host->caps & MMC_CAP_SDIO_IRQ) { 961 else if (host->caps & MMC_CAP_SDIO_IRQ)
961 mmc_host_clk_hold(host);
962 host->ops->enable_sdio_irq(host, 1); 962 host->ops->enable_sdio_irq(host, 1);
963 mmc_host_clk_release(host);
964 }
965 } 963 }
966 964
967 mmc_release_host(host); 965 mmc_release_host(host);
@@ -1018,15 +1016,24 @@ out:
1018static int mmc_sdio_runtime_suspend(struct mmc_host *host) 1016static int mmc_sdio_runtime_suspend(struct mmc_host *host)
1019{ 1017{
1020 /* No references to the card, cut the power to it. */ 1018 /* No references to the card, cut the power to it. */
1019 mmc_claim_host(host);
1021 mmc_power_off(host); 1020 mmc_power_off(host);
1021 mmc_release_host(host);
1022
1022 return 0; 1023 return 0;
1023} 1024}
1024 1025
1025static int mmc_sdio_runtime_resume(struct mmc_host *host) 1026static int mmc_sdio_runtime_resume(struct mmc_host *host)
1026{ 1027{
1028 int ret;
1029
1027 /* Restore power and re-initialize. */ 1030 /* Restore power and re-initialize. */
1031 mmc_claim_host(host);
1028 mmc_power_up(host, host->card->ocr); 1032 mmc_power_up(host, host->card->ocr);
1029 return mmc_sdio_power_restore(host); 1033 ret = mmc_sdio_power_restore(host);
1034 mmc_release_host(host);
1035
1036 return ret;
1030} 1037}
1031 1038
1032static int mmc_sdio_reset(struct mmc_host *host) 1039static int mmc_sdio_reset(struct mmc_host *host)
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 09cc67d028f0..91bbbfb29f3f 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -168,21 +168,15 @@ static int sdio_irq_thread(void *_host)
168 } 168 }
169 169
170 set_current_state(TASK_INTERRUPTIBLE); 170 set_current_state(TASK_INTERRUPTIBLE);
171 if (host->caps & MMC_CAP_SDIO_IRQ) { 171 if (host->caps & MMC_CAP_SDIO_IRQ)
172 mmc_host_clk_hold(host);
173 host->ops->enable_sdio_irq(host, 1); 172 host->ops->enable_sdio_irq(host, 1);
174 mmc_host_clk_release(host);
175 }
176 if (!kthread_should_stop()) 173 if (!kthread_should_stop())
177 schedule_timeout(period); 174 schedule_timeout(period);
178 set_current_state(TASK_RUNNING); 175 set_current_state(TASK_RUNNING);
179 } while (!kthread_should_stop()); 176 } while (!kthread_should_stop());
180 177
181 if (host->caps & MMC_CAP_SDIO_IRQ) { 178 if (host->caps & MMC_CAP_SDIO_IRQ)
182 mmc_host_clk_hold(host);
183 host->ops->enable_sdio_irq(host, 0); 179 host->ops->enable_sdio_irq(host, 0);
184 mmc_host_clk_release(host);
185 }
186 180
187 pr_debug("%s: IRQ thread exiting with code %d\n", 181 pr_debug("%s: IRQ thread exiting with code %d\n",
188 mmc_hostname(host), ret); 182 mmc_hostname(host), ret);
@@ -208,9 +202,7 @@ static int sdio_card_irq_get(struct mmc_card *card)
208 return err; 202 return err;
209 } 203 }
210 } else if (host->caps & MMC_CAP_SDIO_IRQ) { 204 } else if (host->caps & MMC_CAP_SDIO_IRQ) {
211 mmc_host_clk_hold(host);
212 host->ops->enable_sdio_irq(host, 1); 205 host->ops->enable_sdio_irq(host, 1);
213 mmc_host_clk_release(host);
214 } 206 }
215 } 207 }
216 208
@@ -229,9 +221,7 @@ static int sdio_card_irq_put(struct mmc_card *card)
229 atomic_set(&host->sdio_irq_thread_abort, 1); 221 atomic_set(&host->sdio_irq_thread_abort, 1);
230 kthread_stop(host->sdio_irq_thread); 222 kthread_stop(host->sdio_irq_thread);
231 } else if (host->caps & MMC_CAP_SDIO_IRQ) { 223 } else if (host->caps & MMC_CAP_SDIO_IRQ) {
232 mmc_host_clk_hold(host);
233 host->ops->enable_sdio_irq(host, 0); 224 host->ops->enable_sdio_irq(host, 0);
234 mmc_host_clk_release(host);
235 } 225 }
236 } 226 }
237 227
diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h
index 12a4d3ab174c..5660c7f459e9 100644
--- a/drivers/mmc/core/sdio_ops.h
+++ b/drivers/mmc/core/sdio_ops.h
@@ -12,6 +12,8 @@
12#ifndef _MMC_SDIO_OPS_H 12#ifndef _MMC_SDIO_OPS_H
13#define _MMC_SDIO_OPS_H 13#define _MMC_SDIO_OPS_H
14 14
15#include <linux/mmc/sdio.h>
16
15int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr); 17int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
16int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn, 18int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
17 unsigned addr, u8 in, u8* out); 19 unsigned addr, u8 in, u8* out);
@@ -19,5 +21,10 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
19 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz); 21 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
20int sdio_reset(struct mmc_host *host); 22int sdio_reset(struct mmc_host *host);
21 23
24static inline bool mmc_is_io_op(u32 opcode)
25{
26 return opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED;
27}
28
22#endif 29#endif
23 30
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8a1e3498261e..af71de5fda3b 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -67,7 +67,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
67 has the effect of scrambling the addresses and formats of data 67 has the effect of scrambling the addresses and formats of data
68 accessed in sizes other than the datum size. 68 accessed in sizes other than the datum size.
69 69
70 This is the case for the Freescale eSDHC and Nintendo Wii SDHCI. 70 This is the case for the Nintendo Wii SDHCI.
71 71
72config MMC_SDHCI_PCI 72config MMC_SDHCI_PCI
73 tristate "SDHCI support on PCI bus" 73 tristate "SDHCI support on PCI bus"
@@ -140,8 +140,8 @@ config MMC_SDHCI_OF_AT91
140config MMC_SDHCI_OF_ESDHC 140config MMC_SDHCI_OF_ESDHC
141 tristate "SDHCI OF support for the Freescale eSDHC controller" 141 tristate "SDHCI OF support for the Freescale eSDHC controller"
142 depends on MMC_SDHCI_PLTFM 142 depends on MMC_SDHCI_PLTFM
143 depends on PPC 143 depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE
144 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER 144 select MMC_SDHCI_IO_ACCESSORS
145 help 145 help
146 This selects the Freescale eSDHC controller support. 146 This selects the Freescale eSDHC controller support.
147 147
@@ -366,7 +366,7 @@ config MMC_OMAP
366config MMC_OMAP_HS 366config MMC_OMAP_HS
367 tristate "TI OMAP High Speed Multimedia Card Interface support" 367 tristate "TI OMAP High Speed Multimedia Card Interface support"
368 depends on HAS_DMA 368 depends on HAS_DMA
369 depends on ARCH_OMAP2PLUS || COMPILE_TEST 369 depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
370 help 370 help
371 This selects the TI OMAP High Speed Multimedia card Interface. 371 This selects the TI OMAP High Speed Multimedia card Interface.
372 If you have an omap2plus board with a Multimedia Card slot, 372 If you have an omap2plus board with a Multimedia Card slot,
@@ -473,7 +473,7 @@ config MMC_DAVINCI
473 473
474config MMC_GOLDFISH 474config MMC_GOLDFISH
475 tristate "goldfish qemu Multimedia Card Interface support" 475 tristate "goldfish qemu Multimedia Card Interface support"
476 depends on GOLDFISH 476 depends on GOLDFISH || COMPILE_TEST
477 help 477 help
478 This selects the Goldfish Multimedia card Interface emulation 478 This selects the Goldfish Multimedia card Interface emulation
479 found on the Goldfish Android virtual device emulation. 479 found on the Goldfish Android virtual device emulation.
@@ -615,15 +615,7 @@ config MMC_DW
615 help 615 help
616 This selects support for the Synopsys DesignWare Mobile Storage IP 616 This selects support for the Synopsys DesignWare Mobile Storage IP
617 block, this provides host support for SD and MMC interfaces, in both 617 block, this provides host support for SD and MMC interfaces, in both
618 PIO and external DMA modes. 618 PIO, internal DMA mode and external DMA mode.
619
620config MMC_DW_IDMAC
621 bool "Internal DMAC interface"
622 depends on MMC_DW
623 help
624 This selects support for the internal DMAC block within the Synopsys
625 Designware Mobile Storage IP block. This disables the external DMA
626 interface.
627 619
628config MMC_DW_PLTFM 620config MMC_DW_PLTFM
629 tristate "Synopsys Designware MCI Support as platform device" 621 tristate "Synopsys Designware MCI Support as platform device"
@@ -652,7 +644,6 @@ config MMC_DW_K3
652 tristate "K3 specific extensions for Synopsys DW Memory Card Interface" 644 tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
653 depends on MMC_DW 645 depends on MMC_DW
654 select MMC_DW_PLTFM 646 select MMC_DW_PLTFM
655 select MMC_DW_IDMAC
656 help 647 help
657 This selects support for Hisilicon K3 SoC specific extensions to the 648 This selects support for Hisilicon K3 SoC specific extensions to the
658 Synopsys DesignWare Memory Card Interface driver. Select this option 649 Synopsys DesignWare Memory Card Interface driver. Select this option
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 4f3452afa6ca..3595f83e89dd 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -9,8 +9,8 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
9obj-$(CONFIG_MMC_MXS) += mxs-mmc.o 9obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
10obj-$(CONFIG_MMC_SDHCI) += sdhci.o 10obj-$(CONFIG_MMC_SDHCI) += sdhci.o
11obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 11obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
12sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o
12obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o 13obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
13obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-o2micro.o
14obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o 14obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
15obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o 15obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
16obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o 16obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 1e75309898b7..3a7e835a0033 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -446,7 +446,7 @@ out:
446 return loc; 446 return loc;
447} 447}
448 448
449static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot) 449static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
450{ 450{
451 struct dw_mci *host = slot->host; 451 struct dw_mci *host = slot->host;
452 struct dw_mci_exynos_priv_data *priv = host->priv; 452 struct dw_mci_exynos_priv_data *priv = host->priv;
@@ -461,7 +461,7 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot)
461 mci_writel(host, TMOUT, ~0); 461 mci_writel(host, TMOUT, ~0);
462 smpl = dw_mci_exynos_move_next_clksmpl(host); 462 smpl = dw_mci_exynos_move_next_clksmpl(host);
463 463
464 if (!mmc_send_tuning(mmc)) 464 if (!mmc_send_tuning(mmc, opcode, NULL))
465 candiates |= (1 << smpl); 465 candiates |= (1 << smpl);
466 466
467 } while (start_smpl != smpl); 467 } while (start_smpl != smpl);
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index ec6dbcdec693..7e1d13b68b06 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -59,6 +59,8 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
59 host->pdata = pdev->dev.platform_data; 59 host->pdata = pdev->dev.platform_data;
60 60
61 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 61 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
62 /* Get registers' physical base address */
63 host->phy_regs = (void *)(regs->start);
62 host->regs = devm_ioremap_resource(&pdev->dev, regs); 64 host->regs = devm_ioremap_resource(&pdev->dev, regs);
63 if (IS_ERR(host->regs)) 65 if (IS_ERR(host->regs))
64 return PTR_ERR(host->regs); 66 return PTR_ERR(host->regs);
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index bc76aa22473e..9becebeeccd1 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -13,12 +13,19 @@
13#include <linux/mmc/host.h> 13#include <linux/mmc/host.h>
14#include <linux/mmc/dw_mmc.h> 14#include <linux/mmc/dw_mmc.h>
15#include <linux/of_address.h> 15#include <linux/of_address.h>
16#include <linux/slab.h>
16 17
17#include "dw_mmc.h" 18#include "dw_mmc.h"
18#include "dw_mmc-pltfm.h" 19#include "dw_mmc-pltfm.h"
19 20
20#define RK3288_CLKGEN_DIV 2 21#define RK3288_CLKGEN_DIV 2
21 22
23struct dw_mci_rockchip_priv_data {
24 struct clk *drv_clk;
25 struct clk *sample_clk;
26 int default_sample_phase;
27};
28
22static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr) 29static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
23{ 30{
24 *cmdr |= SDMMC_CMD_USE_HOLD_REG; 31 *cmdr |= SDMMC_CMD_USE_HOLD_REG;
@@ -33,6 +40,7 @@ static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
33 40
34static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios) 41static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
35{ 42{
43 struct dw_mci_rockchip_priv_data *priv = host->priv;
36 int ret; 44 int ret;
37 unsigned int cclkin; 45 unsigned int cclkin;
38 u32 bus_hz; 46 u32 bus_hz;
@@ -66,6 +74,158 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
66 /* force dw_mci_setup_bus() */ 74 /* force dw_mci_setup_bus() */
67 host->current_speed = 0; 75 host->current_speed = 0;
68 } 76 }
77
78 /* Make sure we use phases which we can enumerate with */
79 if (!IS_ERR(priv->sample_clk))
80 clk_set_phase(priv->sample_clk, priv->default_sample_phase);
81}
82
83#define NUM_PHASES 360
84#define TUNING_ITERATION_TO_PHASE(i) (DIV_ROUND_UP((i) * 360, NUM_PHASES))
85
86static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
87{
88 struct dw_mci *host = slot->host;
89 struct dw_mci_rockchip_priv_data *priv = host->priv;
90 struct mmc_host *mmc = slot->mmc;
91 int ret = 0;
92 int i;
93 bool v, prev_v = 0, first_v;
94 struct range_t {
95 int start;
96 int end; /* inclusive */
97 };
98 struct range_t *ranges;
99 unsigned int range_count = 0;
100 int longest_range_len = -1;
101 int longest_range = -1;
102 int middle_phase;
103
104 if (IS_ERR(priv->sample_clk)) {
105 dev_err(host->dev, "Tuning clock (sample_clk) not defined.\n");
106 return -EIO;
107 }
108
109 ranges = kmalloc_array(NUM_PHASES / 2 + 1, sizeof(*ranges), GFP_KERNEL);
110 if (!ranges)
111 return -ENOMEM;
112
113 /* Try each phase and extract good ranges */
114 for (i = 0; i < NUM_PHASES; ) {
115 clk_set_phase(priv->sample_clk, TUNING_ITERATION_TO_PHASE(i));
116
117 v = !mmc_send_tuning(mmc, opcode, NULL);
118
119 if (i == 0)
120 first_v = v;
121
122 if ((!prev_v) && v) {
123 range_count++;
124 ranges[range_count-1].start = i;
125 }
126 if (v) {
127 ranges[range_count-1].end = i;
128 i++;
129 } else if (i == NUM_PHASES - 1) {
130 /* No extra skipping rules if we're at the end */
131 i++;
132 } else {
133 /*
134 * No need to check too close to an invalid
135 * one since testing bad phases is slow. Skip
136 * 20 degrees.
137 */
138 i += DIV_ROUND_UP(20 * NUM_PHASES, 360);
139
140 /* Always test the last one */
141 if (i >= NUM_PHASES)
142 i = NUM_PHASES - 1;
143 }
144
145 prev_v = v;
146 }
147
148 if (range_count == 0) {
149 dev_warn(host->dev, "All phases bad!");
150 ret = -EIO;
151 goto free;
152 }
153
154 /* wrap around case, merge the end points */
155 if ((range_count > 1) && first_v && v) {
156 ranges[0].start = ranges[range_count-1].start;
157 range_count--;
158 }
159
160 if (ranges[0].start == 0 && ranges[0].end == NUM_PHASES - 1) {
161 clk_set_phase(priv->sample_clk, priv->default_sample_phase);
162 dev_info(host->dev, "All phases work, using default phase %d.",
163 priv->default_sample_phase);
164 goto free;
165 }
166
167 /* Find the longest range */
168 for (i = 0; i < range_count; i++) {
169 int len = (ranges[i].end - ranges[i].start + 1);
170
171 if (len < 0)
172 len += NUM_PHASES;
173
174 if (longest_range_len < len) {
175 longest_range_len = len;
176 longest_range = i;
177 }
178
179 dev_dbg(host->dev, "Good phase range %d-%d (%d len)\n",
180 TUNING_ITERATION_TO_PHASE(ranges[i].start),
181 TUNING_ITERATION_TO_PHASE(ranges[i].end),
182 len
183 );
184 }
185
186 dev_dbg(host->dev, "Best phase range %d-%d (%d len)\n",
187 TUNING_ITERATION_TO_PHASE(ranges[longest_range].start),
188 TUNING_ITERATION_TO_PHASE(ranges[longest_range].end),
189 longest_range_len
190 );
191
192 middle_phase = ranges[longest_range].start + longest_range_len / 2;
193 middle_phase %= NUM_PHASES;
194 dev_info(host->dev, "Successfully tuned phase to %d\n",
195 TUNING_ITERATION_TO_PHASE(middle_phase));
196
197 clk_set_phase(priv->sample_clk,
198 TUNING_ITERATION_TO_PHASE(middle_phase));
199
200free:
201 kfree(ranges);
202 return ret;
203}
204
205static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
206{
207 struct device_node *np = host->dev->of_node;
208 struct dw_mci_rockchip_priv_data *priv;
209
210 priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
211 if (!priv)
212 return -ENOMEM;
213
214 if (of_property_read_u32(np, "rockchip,default-sample-phase",
215 &priv->default_sample_phase))
216 priv->default_sample_phase = 0;
217
218 priv->drv_clk = devm_clk_get(host->dev, "ciu-drive");
219 if (IS_ERR(priv->drv_clk))
220 dev_dbg(host->dev, "ciu_drv not available\n");
221
222 priv->sample_clk = devm_clk_get(host->dev, "ciu-sample");
223 if (IS_ERR(priv->sample_clk))
224 dev_dbg(host->dev, "ciu_sample not available\n");
225
226 host->priv = priv;
227
228 return 0;
69} 229}
70 230
71static int dw_mci_rockchip_init(struct dw_mci *host) 231static int dw_mci_rockchip_init(struct dw_mci *host)
@@ -95,6 +255,8 @@ static const struct dw_mci_drv_data rk3288_drv_data = {
95 .caps = dw_mci_rk3288_dwmmc_caps, 255 .caps = dw_mci_rk3288_dwmmc_caps,
96 .prepare_command = dw_mci_rockchip_prepare_command, 256 .prepare_command = dw_mci_rockchip_prepare_command,
97 .set_ios = dw_mci_rk3288_set_ios, 257 .set_ios = dw_mci_rk3288_set_ios,
258 .execute_tuning = dw_mci_rk3288_execute_tuning,
259 .parse_dt = dw_mci_rk3288_parse_dt,
98 .setup_clock = dw_mci_rk3288_setup_clock, 260 .setup_clock = dw_mci_rk3288_setup_clock,
99 .init = dw_mci_rockchip_init, 261 .init = dw_mci_rockchip_init,
100}; 262};
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index fcbf5524fd31..7a6cedbe48a8 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -56,7 +56,6 @@
56#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 56#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */ 57#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
58 58
59#ifdef CONFIG_MMC_DW_IDMAC
60#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 59#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
@@ -102,7 +101,6 @@ struct idmac_desc {
102 101
103/* Each descriptor can transfer up to 4KB of data in chained mode */ 102/* Each descriptor can transfer up to 4KB of data in chained mode */
104#define DW_MCI_DESC_DATA_LENGTH 0x1000 103#define DW_MCI_DESC_DATA_LENGTH 0x1000
105#endif /* CONFIG_MMC_DW_IDMAC */
106 104
107static bool dw_mci_reset(struct dw_mci *host); 105static bool dw_mci_reset(struct dw_mci *host);
108static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset); 106static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
@@ -407,7 +405,6 @@ static int dw_mci_get_dma_dir(struct mmc_data *data)
407 return DMA_FROM_DEVICE; 405 return DMA_FROM_DEVICE;
408} 406}
409 407
410#ifdef CONFIG_MMC_DW_IDMAC
411static void dw_mci_dma_cleanup(struct dw_mci *host) 408static void dw_mci_dma_cleanup(struct dw_mci *host)
412{ 409{
413 struct mmc_data *data = host->data; 410 struct mmc_data *data = host->data;
@@ -445,12 +442,21 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host)
445 mci_writel(host, BMOD, temp); 442 mci_writel(host, BMOD, temp);
446} 443}
447 444
448static void dw_mci_idmac_complete_dma(struct dw_mci *host) 445static void dw_mci_dmac_complete_dma(void *arg)
449{ 446{
447 struct dw_mci *host = arg;
450 struct mmc_data *data = host->data; 448 struct mmc_data *data = host->data;
451 449
452 dev_vdbg(host->dev, "DMA complete\n"); 450 dev_vdbg(host->dev, "DMA complete\n");
453 451
452 if ((host->use_dma == TRANS_MODE_EDMAC) &&
453 data && (data->flags & MMC_DATA_READ))
454 /* Invalidate cache after read */
455 dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
456 data->sg,
457 data->sg_len,
458 DMA_FROM_DEVICE);
459
454 host->dma_ops->cleanup(host); 460 host->dma_ops->cleanup(host);
455 461
456 /* 462 /*
@@ -564,7 +570,7 @@ static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
564 wmb(); /* drain writebuffer */ 570 wmb(); /* drain writebuffer */
565} 571}
566 572
567static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 573static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
568{ 574{
569 u32 temp; 575 u32 temp;
570 576
@@ -589,6 +595,8 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
589 595
590 /* Start it running */ 596 /* Start it running */
591 mci_writel(host, PLDMND, 1); 597 mci_writel(host, PLDMND, 1);
598
599 return 0;
592} 600}
593 601
594static int dw_mci_idmac_init(struct dw_mci *host) 602static int dw_mci_idmac_init(struct dw_mci *host)
@@ -669,10 +677,110 @@ static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
669 .init = dw_mci_idmac_init, 677 .init = dw_mci_idmac_init,
670 .start = dw_mci_idmac_start_dma, 678 .start = dw_mci_idmac_start_dma,
671 .stop = dw_mci_idmac_stop_dma, 679 .stop = dw_mci_idmac_stop_dma,
672 .complete = dw_mci_idmac_complete_dma, 680 .complete = dw_mci_dmac_complete_dma,
681 .cleanup = dw_mci_dma_cleanup,
682};
683
684static void dw_mci_edmac_stop_dma(struct dw_mci *host)
685{
686 dmaengine_terminate_all(host->dms->ch);
687}
688
689static int dw_mci_edmac_start_dma(struct dw_mci *host,
690 unsigned int sg_len)
691{
692 struct dma_slave_config cfg;
693 struct dma_async_tx_descriptor *desc = NULL;
694 struct scatterlist *sgl = host->data->sg;
695 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
696 u32 sg_elems = host->data->sg_len;
697 u32 fifoth_val;
698 u32 fifo_offset = host->fifo_reg - host->regs;
699 int ret = 0;
700
701 /* Set external dma config: burst size, burst width */
702 cfg.dst_addr = (dma_addr_t)(host->phy_regs + fifo_offset);
703 cfg.src_addr = cfg.dst_addr;
704 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
705 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
706
707 /* Match burst msize with external dma config */
708 fifoth_val = mci_readl(host, FIFOTH);
709 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
710 cfg.src_maxburst = cfg.dst_maxburst;
711
712 if (host->data->flags & MMC_DATA_WRITE)
713 cfg.direction = DMA_MEM_TO_DEV;
714 else
715 cfg.direction = DMA_DEV_TO_MEM;
716
717 ret = dmaengine_slave_config(host->dms->ch, &cfg);
718 if (ret) {
719 dev_err(host->dev, "Failed to config edmac.\n");
720 return -EBUSY;
721 }
722
723 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
724 sg_len, cfg.direction,
725 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
726 if (!desc) {
727 dev_err(host->dev, "Can't prepare slave sg.\n");
728 return -EBUSY;
729 }
730
731 /* Set dw_mci_dmac_complete_dma as callback */
732 desc->callback = dw_mci_dmac_complete_dma;
733 desc->callback_param = (void *)host;
734 dmaengine_submit(desc);
735
736 /* Flush cache before write */
737 if (host->data->flags & MMC_DATA_WRITE)
738 dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
739 sg_elems, DMA_TO_DEVICE);
740
741 dma_async_issue_pending(host->dms->ch);
742
743 return 0;
744}
745
746static int dw_mci_edmac_init(struct dw_mci *host)
747{
748 /* Request external dma channel */
749 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
750 if (!host->dms)
751 return -ENOMEM;
752
753 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
754 if (!host->dms->ch) {
755 dev_err(host->dev, "Failed to get external DMA channel.\n");
756 kfree(host->dms);
757 host->dms = NULL;
758 return -ENXIO;
759 }
760
761 return 0;
762}
763
764static void dw_mci_edmac_exit(struct dw_mci *host)
765{
766 if (host->dms) {
767 if (host->dms->ch) {
768 dma_release_channel(host->dms->ch);
769 host->dms->ch = NULL;
770 }
771 kfree(host->dms);
772 host->dms = NULL;
773 }
774}
775
776static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
777 .init = dw_mci_edmac_init,
778 .exit = dw_mci_edmac_exit,
779 .start = dw_mci_edmac_start_dma,
780 .stop = dw_mci_edmac_stop_dma,
781 .complete = dw_mci_dmac_complete_dma,
673 .cleanup = dw_mci_dma_cleanup, 782 .cleanup = dw_mci_dma_cleanup,
674}; 783};
675#endif /* CONFIG_MMC_DW_IDMAC */
676 784
677static int dw_mci_pre_dma_transfer(struct dw_mci *host, 785static int dw_mci_pre_dma_transfer(struct dw_mci *host,
678 struct mmc_data *data, 786 struct mmc_data *data,
@@ -752,7 +860,6 @@ static void dw_mci_post_req(struct mmc_host *mmc,
752 860
753static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 861static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
754{ 862{
755#ifdef CONFIG_MMC_DW_IDMAC
756 unsigned int blksz = data->blksz; 863 unsigned int blksz = data->blksz;
757 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 864 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
758 u32 fifo_width = 1 << host->data_shift; 865 u32 fifo_width = 1 << host->data_shift;
@@ -760,6 +867,10 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
760 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 867 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
761 int idx = ARRAY_SIZE(mszs) - 1; 868 int idx = ARRAY_SIZE(mszs) - 1;
762 869
870 /* pio should ship this scenario */
871 if (!host->use_dma)
872 return;
873
763 tx_wmark = (host->fifo_depth) / 2; 874 tx_wmark = (host->fifo_depth) / 2;
764 tx_wmark_invers = host->fifo_depth - tx_wmark; 875 tx_wmark_invers = host->fifo_depth - tx_wmark;
765 876
@@ -788,7 +899,6 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
788done: 899done:
789 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 900 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
790 mci_writel(host, FIFOTH, fifoth_val); 901 mci_writel(host, FIFOTH, fifoth_val);
791#endif
792} 902}
793 903
794static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) 904static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
@@ -850,10 +960,12 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
850 960
851 host->using_dma = 1; 961 host->using_dma = 1;
852 962
853 dev_vdbg(host->dev, 963 if (host->use_dma == TRANS_MODE_IDMAC)
854 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 964 dev_vdbg(host->dev,
855 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, 965 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
856 sg_len); 966 (unsigned long)host->sg_cpu,
967 (unsigned long)host->sg_dma,
968 sg_len);
857 969
858 /* 970 /*
859 * Decide the MSIZE and RX/TX Watermark. 971 * Decide the MSIZE and RX/TX Watermark.
@@ -875,7 +987,11 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
875 mci_writel(host, INTMASK, temp); 987 mci_writel(host, INTMASK, temp);
876 spin_unlock_irqrestore(&host->irq_lock, irqflags); 988 spin_unlock_irqrestore(&host->irq_lock, irqflags);
877 989
878 host->dma_ops->start(host, sg_len); 990 if (host->dma_ops->start(host, sg_len)) {
991 /* We can't do DMA */
992 dev_err(host->dev, "%s: failed to start DMA.\n", __func__);
993 return -ENODEV;
994 }
879 995
880 return 0; 996 return 0;
881} 997}
@@ -1177,6 +1293,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1177 1293
1178 /* DDR mode set */ 1294 /* DDR mode set */
1179 if (ios->timing == MMC_TIMING_MMC_DDR52 || 1295 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1296 ios->timing == MMC_TIMING_UHS_DDR50 ||
1180 ios->timing == MMC_TIMING_MMC_HS400) 1297 ios->timing == MMC_TIMING_MMC_HS400)
1181 regs |= ((0x1 << slot->id) << 16); 1298 regs |= ((0x1 << slot->id) << 16);
1182 else 1299 else
@@ -1279,7 +1396,6 @@ static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1279 const struct dw_mci_drv_data *drv_data = host->drv_data; 1396 const struct dw_mci_drv_data *drv_data = host->drv_data;
1280 u32 uhs; 1397 u32 uhs;
1281 u32 v18 = SDMMC_UHS_18V << slot->id; 1398 u32 v18 = SDMMC_UHS_18V << slot->id;
1282 int min_uv, max_uv;
1283 int ret; 1399 int ret;
1284 1400
1285 if (drv_data && drv_data->switch_voltage) 1401 if (drv_data && drv_data->switch_voltage)
@@ -1291,22 +1407,18 @@ static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1291 * does no harm but you need to set the regulator directly. Try both. 1407 * does no harm but you need to set the regulator directly. Try both.
1292 */ 1408 */
1293 uhs = mci_readl(host, UHS_REG); 1409 uhs = mci_readl(host, UHS_REG);
1294 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { 1410 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1295 min_uv = 2700000;
1296 max_uv = 3600000;
1297 uhs &= ~v18; 1411 uhs &= ~v18;
1298 } else { 1412 else
1299 min_uv = 1700000;
1300 max_uv = 1950000;
1301 uhs |= v18; 1413 uhs |= v18;
1302 } 1414
1303 if (!IS_ERR(mmc->supply.vqmmc)) { 1415 if (!IS_ERR(mmc->supply.vqmmc)) {
1304 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv); 1416 ret = mmc_regulator_set_vqmmc(mmc, ios);
1305 1417
1306 if (ret) { 1418 if (ret) {
1307 dev_dbg(&mmc->class_dev, 1419 dev_dbg(&mmc->class_dev,
1308 "Regulator set error %d: %d - %d\n", 1420 "Regulator set error %d - %s V\n",
1309 ret, min_uv, max_uv); 1421 ret, uhs & v18 ? "1.8" : "3.3");
1310 return ret; 1422 return ret;
1311 } 1423 }
1312 } 1424 }
@@ -1427,7 +1539,7 @@ static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1427 int err = -EINVAL; 1539 int err = -EINVAL;
1428 1540
1429 if (drv_data && drv_data->execute_tuning) 1541 if (drv_data && drv_data->execute_tuning)
1430 err = drv_data->execute_tuning(slot); 1542 err = drv_data->execute_tuning(slot, opcode);
1431 return err; 1543 return err;
1432} 1544}
1433 1545
@@ -2343,15 +2455,17 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2343 2455
2344 } 2456 }
2345 2457
2346#ifdef CONFIG_MMC_DW_IDMAC 2458 if (host->use_dma != TRANS_MODE_IDMAC)
2347 /* Handle DMA interrupts */ 2459 return IRQ_HANDLED;
2460
2461 /* Handle IDMA interrupts */
2348 if (host->dma_64bit_address == 1) { 2462 if (host->dma_64bit_address == 1) {
2349 pending = mci_readl(host, IDSTS64); 2463 pending = mci_readl(host, IDSTS64);
2350 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2464 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2351 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 2465 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2352 SDMMC_IDMAC_INT_RI); 2466 SDMMC_IDMAC_INT_RI);
2353 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2467 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2354 host->dma_ops->complete(host); 2468 host->dma_ops->complete((void *)host);
2355 } 2469 }
2356 } else { 2470 } else {
2357 pending = mci_readl(host, IDSTS); 2471 pending = mci_readl(host, IDSTS);
@@ -2359,10 +2473,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2359 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 2473 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2360 SDMMC_IDMAC_INT_RI); 2474 SDMMC_IDMAC_INT_RI);
2361 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2475 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2362 host->dma_ops->complete(host); 2476 host->dma_ops->complete((void *)host);
2363 } 2477 }
2364 } 2478 }
2365#endif
2366 2479
2367 return IRQ_HANDLED; 2480 return IRQ_HANDLED;
2368} 2481}
@@ -2471,13 +2584,21 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2471 goto err_host_allocated; 2584 goto err_host_allocated;
2472 2585
2473 /* Useful defaults if platform data is unset. */ 2586 /* Useful defaults if platform data is unset. */
2474 if (host->use_dma) { 2587 if (host->use_dma == TRANS_MODE_IDMAC) {
2475 mmc->max_segs = host->ring_size; 2588 mmc->max_segs = host->ring_size;
2476 mmc->max_blk_size = 65536; 2589 mmc->max_blk_size = 65536;
2477 mmc->max_seg_size = 0x1000; 2590 mmc->max_seg_size = 0x1000;
2478 mmc->max_req_size = mmc->max_seg_size * host->ring_size; 2591 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2479 mmc->max_blk_count = mmc->max_req_size / 512; 2592 mmc->max_blk_count = mmc->max_req_size / 512;
2593 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2594 mmc->max_segs = 64;
2595 mmc->max_blk_size = 65536;
2596 mmc->max_blk_count = 65535;
2597 mmc->max_req_size =
2598 mmc->max_blk_size * mmc->max_blk_count;
2599 mmc->max_seg_size = mmc->max_req_size;
2480 } else { 2600 } else {
2601 /* TRANS_MODE_PIO */
2481 mmc->max_segs = 64; 2602 mmc->max_segs = 64;
2482 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 2603 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2483 mmc->max_blk_count = 512; 2604 mmc->max_blk_count = 512;
@@ -2517,38 +2638,74 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2517static void dw_mci_init_dma(struct dw_mci *host) 2638static void dw_mci_init_dma(struct dw_mci *host)
2518{ 2639{
2519 int addr_config; 2640 int addr_config;
2520 /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */ 2641 struct device *dev = host->dev;
2521 addr_config = (mci_readl(host, HCON) >> 27) & 0x01; 2642 struct device_node *np = dev->of_node;
2522
2523 if (addr_config == 1) {
2524 /* host supports IDMAC in 64-bit address mode */
2525 host->dma_64bit_address = 1;
2526 dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
2527 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2528 dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
2529 } else {
2530 /* host supports IDMAC in 32-bit address mode */
2531 host->dma_64bit_address = 0;
2532 dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
2533 }
2534 2643
2535 /* Alloc memory for sg translation */ 2644 /*
2536 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, 2645 * Check tansfer mode from HCON[17:16]
2537 &host->sg_dma, GFP_KERNEL); 2646 * Clear the ambiguous description of dw_mmc databook:
2538 if (!host->sg_cpu) { 2647 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2539 dev_err(host->dev, "%s: could not alloc DMA memory\n", 2648 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2540 __func__); 2649 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2650 * 2b'11: Non DW DMA Interface -> pio only
2651 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2652 * simpler request/acknowledge handshake mechanism and both of them
2653 * are regarded as external dma master for dw_mmc.
2654 */
2655 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2656 if (host->use_dma == DMA_INTERFACE_IDMA) {
2657 host->use_dma = TRANS_MODE_IDMAC;
2658 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2659 host->use_dma == DMA_INTERFACE_GDMA) {
2660 host->use_dma = TRANS_MODE_EDMAC;
2661 } else {
2541 goto no_dma; 2662 goto no_dma;
2542 } 2663 }
2543 2664
2544 /* Determine which DMA interface to use */ 2665 /* Determine which DMA interface to use */
2545#ifdef CONFIG_MMC_DW_IDMAC 2666 if (host->use_dma == TRANS_MODE_IDMAC) {
2546 host->dma_ops = &dw_mci_idmac_ops; 2667 /*
2547 dev_info(host->dev, "Using internal DMA controller.\n"); 2668 * Check ADDR_CONFIG bit in HCON to find
2548#endif 2669 * IDMAC address bus width
2670 */
2671 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
2672
2673 if (addr_config == 1) {
2674 /* host supports IDMAC in 64-bit address mode */
2675 host->dma_64bit_address = 1;
2676 dev_info(host->dev,
2677 "IDMAC supports 64-bit address mode.\n");
2678 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2679 dma_set_coherent_mask(host->dev,
2680 DMA_BIT_MASK(64));
2681 } else {
2682 /* host supports IDMAC in 32-bit address mode */
2683 host->dma_64bit_address = 0;
2684 dev_info(host->dev,
2685 "IDMAC supports 32-bit address mode.\n");
2686 }
2549 2687
2550 if (!host->dma_ops) 2688 /* Alloc memory for sg translation */
2551 goto no_dma; 2689 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2690 &host->sg_dma, GFP_KERNEL);
2691 if (!host->sg_cpu) {
2692 dev_err(host->dev,
2693 "%s: could not alloc DMA memory\n",
2694 __func__);
2695 goto no_dma;
2696 }
2697
2698 host->dma_ops = &dw_mci_idmac_ops;
2699 dev_info(host->dev, "Using internal DMA controller.\n");
2700 } else {
2701 /* TRANS_MODE_EDMAC: check dma bindings again */
2702 if ((of_property_count_strings(np, "dma-names") < 0) ||
2703 (!of_find_property(np, "dmas", NULL))) {
2704 goto no_dma;
2705 }
2706 host->dma_ops = &dw_mci_edmac_ops;
2707 dev_info(host->dev, "Using external DMA controller.\n");
2708 }
2552 2709
2553 if (host->dma_ops->init && host->dma_ops->start && 2710 if (host->dma_ops->init && host->dma_ops->start &&
2554 host->dma_ops->stop && host->dma_ops->cleanup) { 2711 host->dma_ops->stop && host->dma_ops->cleanup) {
@@ -2562,12 +2719,11 @@ static void dw_mci_init_dma(struct dw_mci *host)
2562 goto no_dma; 2719 goto no_dma;
2563 } 2720 }
2564 2721
2565 host->use_dma = 1;
2566 return; 2722 return;
2567 2723
2568no_dma: 2724no_dma:
2569 dev_info(host->dev, "Using PIO mode.\n"); 2725 dev_info(host->dev, "Using PIO mode.\n");
2570 host->use_dma = 0; 2726 host->use_dma = TRANS_MODE_PIO;
2571} 2727}
2572 2728
2573static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 2729static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
@@ -2650,10 +2806,9 @@ static bool dw_mci_reset(struct dw_mci *host)
2650 } 2806 }
2651 } 2807 }
2652 2808
2653#if IS_ENABLED(CONFIG_MMC_DW_IDMAC) 2809 if (host->use_dma == TRANS_MODE_IDMAC)
2654 /* It is also recommended that we reset and reprogram idmac */ 2810 /* It is also recommended that we reset and reprogram idmac */
2655 dw_mci_idmac_reset(host); 2811 dw_mci_idmac_reset(host);
2656#endif
2657 2812
2658 ret = true; 2813 ret = true;
2659 2814
@@ -2890,7 +3045,7 @@ int dw_mci_probe(struct dw_mci *host)
2890 * Get the host data width - this assumes that HCON has been set with 3045 * Get the host data width - this assumes that HCON has been set with
2891 * the correct values. 3046 * the correct values.
2892 */ 3047 */
2893 i = (mci_readl(host, HCON) >> 7) & 0x7; 3048 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
2894 if (!i) { 3049 if (!i) {
2895 host->push_data = dw_mci_push_data16; 3050 host->push_data = dw_mci_push_data16;
2896 host->pull_data = dw_mci_pull_data16; 3051 host->pull_data = dw_mci_pull_data16;
@@ -2972,7 +3127,7 @@ int dw_mci_probe(struct dw_mci *host)
2972 if (host->pdata->num_slots) 3127 if (host->pdata->num_slots)
2973 host->num_slots = host->pdata->num_slots; 3128 host->num_slots = host->pdata->num_slots;
2974 else 3129 else
2975 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; 3130 host->num_slots = SDMMC_GET_SLOT_NUM(mci_readl(host, HCON));
2976 3131
2977 /* 3132 /*
2978 * Enable interrupts for command done, data over, data empty, 3133 * Enable interrupts for command done, data over, data empty,
@@ -3067,6 +3222,9 @@ EXPORT_SYMBOL(dw_mci_remove);
3067 */ 3222 */
3068int dw_mci_suspend(struct dw_mci *host) 3223int dw_mci_suspend(struct dw_mci *host)
3069{ 3224{
3225 if (host->use_dma && host->dma_ops->exit)
3226 host->dma_ops->exit(host);
3227
3070 return 0; 3228 return 0;
3071} 3229}
3072EXPORT_SYMBOL(dw_mci_suspend); 3230EXPORT_SYMBOL(dw_mci_suspend);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 8ce4674730a6..f695b58f0613 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -148,6 +148,15 @@
148#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ 148#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \
149 ((r) & 0xFFF) << 16 | \ 149 ((r) & 0xFFF) << 16 | \
150 ((t) & 0xFFF)) 150 ((t) & 0xFFF))
151/* HCON register defines */
152#define DMA_INTERFACE_IDMA (0x0)
153#define DMA_INTERFACE_DWDMA (0x1)
154#define DMA_INTERFACE_GDMA (0x2)
155#define DMA_INTERFACE_NODMA (0x3)
156#define SDMMC_GET_TRANS_MODE(x) (((x)>>16) & 0x3)
157#define SDMMC_GET_SLOT_NUM(x) ((((x)>>1) & 0x1F) + 1)
158#define SDMMC_GET_HDATA_WIDTH(x) (((x)>>7) & 0x7)
159#define SDMMC_GET_ADDR_CONFIG(x) (((x)>>27) & 0x1)
151/* Internal DMAC interrupt defines */ 160/* Internal DMAC interrupt defines */
152#define SDMMC_IDMAC_INT_AI BIT(9) 161#define SDMMC_IDMAC_INT_AI BIT(9)
153#define SDMMC_IDMAC_INT_NI BIT(8) 162#define SDMMC_IDMAC_INT_NI BIT(8)
@@ -163,7 +172,7 @@
163/* Version ID register define */ 172/* Version ID register define */
164#define SDMMC_GET_VERID(x) ((x) & 0xFFFF) 173#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
165/* Card read threshold */ 174/* Card read threshold */
166#define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x)) 175#define SDMMC_SET_RD_THLD(v, x) (((v) & 0xFFF) << 16 | (x))
167#define SDMMC_UHS_18V BIT(0) 176#define SDMMC_UHS_18V BIT(0)
168/* All ctrl reset bits */ 177/* All ctrl reset bits */
169#define SDMMC_CTRL_ALL_RESET_FLAGS \ 178#define SDMMC_CTRL_ALL_RESET_FLAGS \
@@ -281,7 +290,7 @@ struct dw_mci_drv_data {
281 void (*prepare_command)(struct dw_mci *host, u32 *cmdr); 290 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
282 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); 291 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
283 int (*parse_dt)(struct dw_mci *host); 292 int (*parse_dt)(struct dw_mci *host);
284 int (*execute_tuning)(struct dw_mci_slot *slot); 293 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode);
285 int (*prepare_hs400_tuning)(struct dw_mci *host, 294 int (*prepare_hs400_tuning)(struct dw_mci *host,
286 struct mmc_ios *ios); 295 struct mmc_ios *ios);
287 int (*switch_voltage)(struct mmc_host *mmc, 296 int (*switch_voltage)(struct mmc_host *mmc,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index ae19d83bb9de..8ee11f4120fc 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1511,6 +1511,7 @@ static const struct of_device_id mmc_spi_of_match_table[] = {
1511 { .compatible = "mmc-spi-slot", }, 1511 { .compatible = "mmc-spi-slot", },
1512 {}, 1512 {},
1513}; 1513};
1514MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
1514 1515
1515static struct spi_driver mmc_spi_driver = { 1516static struct spi_driver mmc_spi_driver = {
1516 .driver = { 1517 .driver = {
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 006f1862444b..79905ce895ad 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -711,6 +711,7 @@ static const struct of_device_id moxart_mmc_match[] = {
711 { .compatible = "faraday,ftsdc010" }, 711 { .compatible = "faraday,ftsdc010" },
712 { } 712 { }
713}; 713};
714MODULE_DEVICE_TABLE(of, moxart_mmc_match);
714 715
715static struct platform_driver moxart_mmc_driver = { 716static struct platform_driver moxart_mmc_driver = {
716 .probe = moxart_probe, 717 .probe = moxart_probe,
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 7153500dd007..39568cc29a2a 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -26,6 +26,7 @@
26#include <linux/pm.h> 26#include <linux/pm.h>
27#include <linux/pm_runtime.h> 27#include <linux/pm_runtime.h>
28#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/slab.h>
29#include <linux/spinlock.h> 30#include <linux/spinlock.h>
30 31
31#include <linux/mmc/card.h> 32#include <linux/mmc/card.h>
@@ -64,6 +65,7 @@
64#define SDC_RESP2 0x48 65#define SDC_RESP2 0x48
65#define SDC_RESP3 0x4c 66#define SDC_RESP3 0x4c
66#define SDC_BLK_NUM 0x50 67#define SDC_BLK_NUM 0x50
68#define EMMC_IOCON 0x7c
67#define SDC_ACMD_RESP 0x80 69#define SDC_ACMD_RESP 0x80
68#define MSDC_DMA_SA 0x90 70#define MSDC_DMA_SA 0x90
69#define MSDC_DMA_CTRL 0x98 71#define MSDC_DMA_CTRL 0x98
@@ -71,6 +73,8 @@
71#define MSDC_PATCH_BIT 0xb0 73#define MSDC_PATCH_BIT 0xb0
72#define MSDC_PATCH_BIT1 0xb4 74#define MSDC_PATCH_BIT1 0xb4
73#define MSDC_PAD_TUNE 0xec 75#define MSDC_PAD_TUNE 0xec
76#define PAD_DS_TUNE 0x188
77#define EMMC50_CFG0 0x208
74 78
75/*--------------------------------------------------------------------------*/ 79/*--------------------------------------------------------------------------*/
76/* Register Mask */ 80/* Register Mask */
@@ -87,6 +91,7 @@
87#define MSDC_CFG_CKSTB (0x1 << 7) /* R */ 91#define MSDC_CFG_CKSTB (0x1 << 7) /* R */
88#define MSDC_CFG_CKDIV (0xff << 8) /* RW */ 92#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
89#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */ 93#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
94#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
90 95
91/* MSDC_IOCON mask */ 96/* MSDC_IOCON mask */
92#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */ 97#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
@@ -204,6 +209,17 @@
204#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ 209#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
205#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ 210#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
206 211
212#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
213#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
214
215#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
216#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
217#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
218
219#define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */
220#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
221#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
222
207#define REQ_CMD_EIO (0x1 << 0) 223#define REQ_CMD_EIO (0x1 << 0)
208#define REQ_CMD_TMO (0x1 << 1) 224#define REQ_CMD_TMO (0x1 << 1)
209#define REQ_DAT_ERR (0x1 << 2) 225#define REQ_DAT_ERR (0x1 << 2)
@@ -219,6 +235,7 @@
219#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */ 235#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */
220#define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */ 236#define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */
221 237
238#define PAD_DELAY_MAX 32 /* PAD delay cells */
222/*--------------------------------------------------------------------------*/ 239/*--------------------------------------------------------------------------*/
223/* Descriptor Structure */ 240/* Descriptor Structure */
224/*--------------------------------------------------------------------------*/ 241/*--------------------------------------------------------------------------*/
@@ -265,6 +282,14 @@ struct msdc_save_para {
265 u32 pad_tune; 282 u32 pad_tune;
266 u32 patch_bit0; 283 u32 patch_bit0;
267 u32 patch_bit1; 284 u32 patch_bit1;
285 u32 pad_ds_tune;
286 u32 emmc50_cfg0;
287};
288
289struct msdc_delay_phase {
290 u8 maxlen;
291 u8 start;
292 u8 final_phase;
268}; 293};
269 294
270struct msdc_host { 295struct msdc_host {
@@ -297,8 +322,9 @@ struct msdc_host {
297 u32 mclk; /* mmc subsystem clock frequency */ 322 u32 mclk; /* mmc subsystem clock frequency */
298 u32 src_clk_freq; /* source clock frequency */ 323 u32 src_clk_freq; /* source clock frequency */
299 u32 sclk; /* SD/MS bus clock frequency */ 324 u32 sclk; /* SD/MS bus clock frequency */
300 bool ddr; 325 unsigned char timing;
301 bool vqmmc_enabled; 326 bool vqmmc_enabled;
327 u32 hs400_ds_delay;
302 struct msdc_save_para save_para; /* used when gate HCLK */ 328 struct msdc_save_para save_para; /* used when gate HCLK */
303}; 329};
304 330
@@ -353,7 +379,10 @@ static void msdc_reset_hw(struct msdc_host *host)
353static void msdc_cmd_next(struct msdc_host *host, 379static void msdc_cmd_next(struct msdc_host *host,
354 struct mmc_request *mrq, struct mmc_command *cmd); 380 struct mmc_request *mrq, struct mmc_command *cmd);
355 381
356static u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | 382static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
383 MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
384 MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO;
385static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO |
357 MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR | 386 MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR |
358 MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT; 387 MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT;
359 388
@@ -485,7 +514,7 @@ static void msdc_ungate_clock(struct msdc_host *host)
485 cpu_relax(); 514 cpu_relax();
486} 515}
487 516
488static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz) 517static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
489{ 518{
490 u32 mode; 519 u32 mode;
491 u32 flags; 520 u32 flags;
@@ -501,8 +530,15 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
501 530
502 flags = readl(host->base + MSDC_INTEN); 531 flags = readl(host->base + MSDC_INTEN);
503 sdr_clr_bits(host->base + MSDC_INTEN, flags); 532 sdr_clr_bits(host->base + MSDC_INTEN, flags);
504 if (ddr) { /* may need to modify later */ 533 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
505 mode = 0x2; /* ddr mode and use divisor */ 534 if (timing == MMC_TIMING_UHS_DDR50 ||
535 timing == MMC_TIMING_MMC_DDR52 ||
536 timing == MMC_TIMING_MMC_HS400) {
537 if (timing == MMC_TIMING_MMC_HS400)
538 mode = 0x3;
539 else
540 mode = 0x2; /* ddr mode and use divisor */
541
506 if (hz >= (host->src_clk_freq >> 2)) { 542 if (hz >= (host->src_clk_freq >> 2)) {
507 div = 0; /* mean div = 1/4 */ 543 div = 0; /* mean div = 1/4 */
508 sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */ 544 sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */
@@ -511,6 +547,14 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
511 sclk = (host->src_clk_freq >> 2) / div; 547 sclk = (host->src_clk_freq >> 2) / div;
512 div = (div >> 1); 548 div = (div >> 1);
513 } 549 }
550
551 if (timing == MMC_TIMING_MMC_HS400 &&
552 hz >= (host->src_clk_freq >> 1)) {
553 sdr_set_bits(host->base + MSDC_CFG,
554 MSDC_CFG_HS400_CK_MODE);
555 sclk = host->src_clk_freq >> 1;
556 div = 0; /* div is ignore when bit18 is set */
557 }
514 } else if (hz >= host->src_clk_freq) { 558 } else if (hz >= host->src_clk_freq) {
515 mode = 0x1; /* no divisor */ 559 mode = 0x1; /* no divisor */
516 div = 0; 560 div = 0;
@@ -532,12 +576,12 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
532 cpu_relax(); 576 cpu_relax();
533 host->sclk = sclk; 577 host->sclk = sclk;
534 host->mclk = hz; 578 host->mclk = hz;
535 host->ddr = ddr; 579 host->timing = timing;
536 /* need because clk changed. */ 580 /* need because clk changed. */
537 msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); 581 msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
538 sdr_set_bits(host->base + MSDC_INTEN, flags); 582 sdr_set_bits(host->base + MSDC_INTEN, flags);
539 583
540 dev_dbg(host->dev, "sclk: %d, ddr: %d\n", host->sclk, ddr); 584 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
541} 585}
542 586
543static inline u32 msdc_cmd_find_resp(struct msdc_host *host, 587static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
@@ -725,11 +769,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
725 if (done) 769 if (done)
726 return true; 770 return true;
727 771
728 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CMDRDY | 772 sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
729 MSDC_INTEN_RSPCRCERR | MSDC_INTEN_CMDTMO |
730 MSDC_INTEN_ACMDRDY | MSDC_INTEN_ACMDCRCERR |
731 MSDC_INTEN_ACMDTMO);
732 writel(cmd->arg, host->base + SDC_ARG);
733 773
734 if (cmd->flags & MMC_RSP_PRESENT) { 774 if (cmd->flags & MMC_RSP_PRESENT) {
735 if (cmd->flags & MMC_RSP_136) { 775 if (cmd->flags & MMC_RSP_136) {
@@ -819,10 +859,7 @@ static void msdc_start_command(struct msdc_host *host,
819 rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); 859 rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
820 mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); 860 mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
821 861
822 sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CMDRDY | 862 sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
823 MSDC_INTEN_RSPCRCERR | MSDC_INTEN_CMDTMO |
824 MSDC_INTEN_ACMDRDY | MSDC_INTEN_ACMDCRCERR |
825 MSDC_INTEN_ACMDTMO);
826 writel(cmd->arg, host->base + SDC_ARG); 863 writel(cmd->arg, host->base + SDC_ARG);
827 writel(rawcmd, host->base + SDC_CMD); 864 writel(rawcmd, host->base + SDC_CMD);
828} 865}
@@ -896,7 +933,7 @@ static void msdc_data_xfer_next(struct msdc_host *host,
896 struct mmc_request *mrq, struct mmc_data *data) 933 struct mmc_request *mrq, struct mmc_data *data)
897{ 934{
898 if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error && 935 if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error &&
899 (!data->bytes_xfered || !mrq->sbc)) 936 !mrq->sbc)
900 msdc_start_command(host, mrq, mrq->stop); 937 msdc_start_command(host, mrq, mrq->stop);
901 else 938 else
902 msdc_request_done(host, mrq); 939 msdc_request_done(host, mrq);
@@ -942,6 +979,8 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
942 979
943 if (events & MSDC_INT_DATTMO) 980 if (events & MSDC_INT_DATTMO)
944 data->error = -ETIMEDOUT; 981 data->error = -ETIMEDOUT;
982 else if (events & MSDC_INT_DATCRCERR)
983 data->error = -EILSEQ;
945 984
946 dev_err(host->dev, "%s: cmd=%d; blocks=%d", 985 dev_err(host->dev, "%s: cmd=%d; blocks=%d",
947 __func__, mrq->cmd->opcode, data->blocks); 986 __func__, mrq->cmd->opcode, data->blocks);
@@ -1113,10 +1152,12 @@ static void msdc_init_hw(struct msdc_host *host)
1113 1152
1114 writel(0, host->base + MSDC_PAD_TUNE); 1153 writel(0, host->base + MSDC_PAD_TUNE);
1115 writel(0, host->base + MSDC_IOCON); 1154 writel(0, host->base + MSDC_IOCON);
1116 sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 1); 1155 sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
1117 writel(0x403c004f, host->base + MSDC_PATCH_BIT); 1156 writel(0x403c0046, host->base + MSDC_PATCH_BIT);
1118 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1); 1157 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
1119 writel(0xffff0089, host->base + MSDC_PATCH_BIT1); 1158 writel(0xffff0089, host->base + MSDC_PATCH_BIT1);
1159 sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
1160
1120 /* Configure to enable SDIO mode. 1161 /* Configure to enable SDIO mode.
1121 * it's must otherwise sdio cmd5 failed 1162 * it's must otherwise sdio cmd5 failed
1122 */ 1163 */
@@ -1148,11 +1189,14 @@ static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
1148 struct mt_bdma_desc *bd = dma->bd; 1189 struct mt_bdma_desc *bd = dma->bd;
1149 int i; 1190 int i;
1150 1191
1151 memset(gpd, 0, sizeof(struct mt_gpdma_desc)); 1192 memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2);
1152 1193
1153 gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */ 1194 gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */
1154 gpd->ptr = (u32)dma->bd_addr; /* physical address */ 1195 gpd->ptr = (u32)dma->bd_addr; /* physical address */
1155 1196 /* gpd->next is must set for desc DMA
1197 * That's why must alloc 2 gpd structure.
1198 */
1199 gpd->next = (u32)dma->gpd_addr + sizeof(struct mt_gpdma_desc);
1156 memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM); 1200 memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM);
1157 for (i = 0; i < (MAX_BD_NUM - 1); i++) 1201 for (i = 0; i < (MAX_BD_NUM - 1); i++)
1158 bd[i].next = (u32)dma->bd_addr + sizeof(*bd) * (i + 1); 1202 bd[i].next = (u32)dma->bd_addr + sizeof(*bd) * (i + 1);
@@ -1162,20 +1206,16 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1162{ 1206{
1163 struct msdc_host *host = mmc_priv(mmc); 1207 struct msdc_host *host = mmc_priv(mmc);
1164 int ret; 1208 int ret;
1165 u32 ddr = 0;
1166 1209
1167 pm_runtime_get_sync(host->dev); 1210 pm_runtime_get_sync(host->dev);
1168 1211
1169 if (ios->timing == MMC_TIMING_UHS_DDR50 ||
1170 ios->timing == MMC_TIMING_MMC_DDR52)
1171 ddr = 1;
1172
1173 msdc_set_buswidth(host, ios->bus_width); 1212 msdc_set_buswidth(host, ios->bus_width);
1174 1213
1175 /* Suspend/Resume will do power off/on */ 1214 /* Suspend/Resume will do power off/on */
1176 switch (ios->power_mode) { 1215 switch (ios->power_mode) {
1177 case MMC_POWER_UP: 1216 case MMC_POWER_UP:
1178 if (!IS_ERR(mmc->supply.vmmc)) { 1217 if (!IS_ERR(mmc->supply.vmmc)) {
1218 msdc_init_hw(host);
1179 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 1219 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1180 ios->vdd); 1220 ios->vdd);
1181 if (ret) { 1221 if (ret) {
@@ -1206,14 +1246,207 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1206 break; 1246 break;
1207 } 1247 }
1208 1248
1209 if (host->mclk != ios->clock || host->ddr != ddr) 1249 if (host->mclk != ios->clock || host->timing != ios->timing)
1210 msdc_set_mclk(host, ddr, ios->clock); 1250 msdc_set_mclk(host, ios->timing, ios->clock);
1211 1251
1212end: 1252end:
1213 pm_runtime_mark_last_busy(host->dev); 1253 pm_runtime_mark_last_busy(host->dev);
1214 pm_runtime_put_autosuspend(host->dev); 1254 pm_runtime_put_autosuspend(host->dev);
1215} 1255}
1216 1256
1257static u32 test_delay_bit(u32 delay, u32 bit)
1258{
1259 bit %= PAD_DELAY_MAX;
1260 return delay & (1 << bit);
1261}
1262
1263static int get_delay_len(u32 delay, u32 start_bit)
1264{
1265 int i;
1266
1267 for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) {
1268 if (test_delay_bit(delay, start_bit + i) == 0)
1269 return i;
1270 }
1271 return PAD_DELAY_MAX - start_bit;
1272}
1273
1274static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
1275{
1276 int start = 0, len = 0;
1277 int start_final = 0, len_final = 0;
1278 u8 final_phase = 0xff;
1279 struct msdc_delay_phase delay_phase;
1280
1281 if (delay == 0) {
1282 dev_err(host->dev, "phase error: [map:%x]\n", delay);
1283 delay_phase.final_phase = final_phase;
1284 return delay_phase;
1285 }
1286
1287 while (start < PAD_DELAY_MAX) {
1288 len = get_delay_len(delay, start);
1289 if (len_final < len) {
1290 start_final = start;
1291 len_final = len;
1292 }
1293 start += len ? len : 1;
1294 if (len >= 8 && start_final < 4)
1295 break;
1296 }
1297
1298 /* The rule is that to find the smallest delay cell */
1299 if (start_final == 0)
1300 final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
1301 else
1302 final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
1303 dev_info(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
1304 delay, len_final, final_phase);
1305
1306 delay_phase.maxlen = len_final;
1307 delay_phase.start = start_final;
1308 delay_phase.final_phase = final_phase;
1309 return delay_phase;
1310}
1311
1312static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
1313{
1314 struct msdc_host *host = mmc_priv(mmc);
1315 u32 rise_delay = 0, fall_delay = 0;
1316 struct msdc_delay_phase final_rise_delay, final_fall_delay;
1317 u8 final_delay, final_maxlen;
1318 int cmd_err;
1319 int i;
1320
1321 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
1322 for (i = 0 ; i < PAD_DELAY_MAX; i++) {
1323 sdr_set_field(host->base + MSDC_PAD_TUNE,
1324 MSDC_PAD_TUNE_CMDRDLY, i);
1325 mmc_send_tuning(mmc, opcode, &cmd_err);
1326 if (!cmd_err)
1327 rise_delay |= (1 << i);
1328 }
1329
1330 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
1331 for (i = 0; i < PAD_DELAY_MAX; i++) {
1332 sdr_set_field(host->base + MSDC_PAD_TUNE,
1333 MSDC_PAD_TUNE_CMDRDLY, i);
1334 mmc_send_tuning(mmc, opcode, &cmd_err);
1335 if (!cmd_err)
1336 fall_delay |= (1 << i);
1337 }
1338
1339 final_rise_delay = get_best_delay(host, rise_delay);
1340 final_fall_delay = get_best_delay(host, fall_delay);
1341
1342 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
1343 if (final_maxlen == final_rise_delay.maxlen) {
1344 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
1345 sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
1346 final_rise_delay.final_phase);
1347 final_delay = final_rise_delay.final_phase;
1348 } else {
1349 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
1350 sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
1351 final_fall_delay.final_phase);
1352 final_delay = final_fall_delay.final_phase;
1353 }
1354
1355 return final_delay == 0xff ? -EIO : 0;
1356}
1357
1358static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
1359{
1360 struct msdc_host *host = mmc_priv(mmc);
1361 u32 rise_delay = 0, fall_delay = 0;
1362 struct msdc_delay_phase final_rise_delay, final_fall_delay;
1363 u8 final_delay, final_maxlen;
1364 int i, ret;
1365
1366 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
1367 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
1368 for (i = 0 ; i < PAD_DELAY_MAX; i++) {
1369 sdr_set_field(host->base + MSDC_PAD_TUNE,
1370 MSDC_PAD_TUNE_DATRRDLY, i);
1371 ret = mmc_send_tuning(mmc, opcode, NULL);
1372 if (!ret)
1373 rise_delay |= (1 << i);
1374 }
1375
1376 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
1377 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
1378 for (i = 0; i < PAD_DELAY_MAX; i++) {
1379 sdr_set_field(host->base + MSDC_PAD_TUNE,
1380 MSDC_PAD_TUNE_DATRRDLY, i);
1381 ret = mmc_send_tuning(mmc, opcode, NULL);
1382 if (!ret)
1383 fall_delay |= (1 << i);
1384 }
1385
1386 final_rise_delay = get_best_delay(host, rise_delay);
1387 final_fall_delay = get_best_delay(host, fall_delay);
1388
1389 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
1390 /* Rising edge is more stable, prefer to use it */
1391 if (final_rise_delay.maxlen >= 10)
1392 final_maxlen = final_rise_delay.maxlen;
1393 if (final_maxlen == final_rise_delay.maxlen) {
1394 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
1395 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
1396 sdr_set_field(host->base + MSDC_PAD_TUNE,
1397 MSDC_PAD_TUNE_DATRRDLY,
1398 final_rise_delay.final_phase);
1399 final_delay = final_rise_delay.final_phase;
1400 } else {
1401 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
1402 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
1403 sdr_set_field(host->base + MSDC_PAD_TUNE,
1404 MSDC_PAD_TUNE_DATRRDLY,
1405 final_fall_delay.final_phase);
1406 final_delay = final_fall_delay.final_phase;
1407 }
1408
1409 return final_delay == 0xff ? -EIO : 0;
1410}
1411
1412static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1413{
1414 struct msdc_host *host = mmc_priv(mmc);
1415 int ret;
1416
1417 pm_runtime_get_sync(host->dev);
1418 ret = msdc_tune_response(mmc, opcode);
1419 if (ret == -EIO) {
1420 dev_err(host->dev, "Tune response fail!\n");
1421 goto out;
1422 }
1423 ret = msdc_tune_data(mmc, opcode);
1424 if (ret == -EIO)
1425 dev_err(host->dev, "Tune data fail!\n");
1426
1427out:
1428 pm_runtime_mark_last_busy(host->dev);
1429 pm_runtime_put_autosuspend(host->dev);
1430 return ret;
1431}
1432
1433static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1434{
1435 struct msdc_host *host = mmc_priv(mmc);
1436
1437 writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
1438 return 0;
1439}
1440
1441static void msdc_hw_reset(struct mmc_host *mmc)
1442{
1443 struct msdc_host *host = mmc_priv(mmc);
1444
1445 sdr_set_bits(host->base + EMMC_IOCON, 1);
1446 udelay(10); /* 10us is enough */
1447 sdr_clr_bits(host->base + EMMC_IOCON, 1);
1448}
1449
1217static struct mmc_host_ops mt_msdc_ops = { 1450static struct mmc_host_ops mt_msdc_ops = {
1218 .post_req = msdc_post_req, 1451 .post_req = msdc_post_req,
1219 .pre_req = msdc_pre_req, 1452 .pre_req = msdc_pre_req,
@@ -1221,6 +1454,9 @@ static struct mmc_host_ops mt_msdc_ops = {
1221 .set_ios = msdc_ops_set_ios, 1454 .set_ios = msdc_ops_set_ios,
1222 .start_signal_voltage_switch = msdc_ops_switch_volt, 1455 .start_signal_voltage_switch = msdc_ops_switch_volt,
1223 .card_busy = msdc_card_busy, 1456 .card_busy = msdc_card_busy,
1457 .execute_tuning = msdc_execute_tuning,
1458 .prepare_hs400_tuning = msdc_prepare_hs400_tuning,
1459 .hw_reset = msdc_hw_reset,
1224}; 1460};
1225 1461
1226static int msdc_drv_probe(struct platform_device *pdev) 1462static int msdc_drv_probe(struct platform_device *pdev)
@@ -1294,6 +1530,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
1294 goto host_free; 1530 goto host_free;
1295 } 1531 }
1296 1532
1533 if (!of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
1534 &host->hs400_ds_delay))
1535 dev_dbg(&pdev->dev, "hs400-ds-delay: %x\n",
1536 host->hs400_ds_delay);
1537
1297 host->dev = &pdev->dev; 1538 host->dev = &pdev->dev;
1298 host->mmc = mmc; 1539 host->mmc = mmc;
1299 host->src_clk_freq = clk_get_rate(host->src_clk); 1540 host->src_clk_freq = clk_get_rate(host->src_clk);
@@ -1302,6 +1543,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
1302 mmc->f_min = host->src_clk_freq / (4 * 255); 1543 mmc->f_min = host->src_clk_freq / (4 * 255);
1303 1544
1304 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; 1545 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
1546 mmc->caps |= MMC_CAP_RUNTIME_RESUME;
1305 /* MMC core transfer sizes tunable parameters */ 1547 /* MMC core transfer sizes tunable parameters */
1306 mmc->max_segs = MAX_BD_NUM; 1548 mmc->max_segs = MAX_BD_NUM;
1307 mmc->max_seg_size = BDMA_DESC_BUFLEN; 1549 mmc->max_seg_size = BDMA_DESC_BUFLEN;
@@ -1313,7 +1555,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
1313 1555
1314 host->timeout_clks = 3 * 1048576; 1556 host->timeout_clks = 3 * 1048576;
1315 host->dma.gpd = dma_alloc_coherent(&pdev->dev, 1557 host->dma.gpd = dma_alloc_coherent(&pdev->dev,
1316 sizeof(struct mt_gpdma_desc), 1558 2 * sizeof(struct mt_gpdma_desc),
1317 &host->dma.gpd_addr, GFP_KERNEL); 1559 &host->dma.gpd_addr, GFP_KERNEL);
1318 host->dma.bd = dma_alloc_coherent(&pdev->dev, 1560 host->dma.bd = dma_alloc_coherent(&pdev->dev,
1319 MAX_BD_NUM * sizeof(struct mt_bdma_desc), 1561 MAX_BD_NUM * sizeof(struct mt_bdma_desc),
@@ -1354,7 +1596,7 @@ release:
1354release_mem: 1596release_mem:
1355 if (host->dma.gpd) 1597 if (host->dma.gpd)
1356 dma_free_coherent(&pdev->dev, 1598 dma_free_coherent(&pdev->dev,
1357 sizeof(struct mt_gpdma_desc), 1599 2 * sizeof(struct mt_gpdma_desc),
1358 host->dma.gpd, host->dma.gpd_addr); 1600 host->dma.gpd, host->dma.gpd_addr);
1359 if (host->dma.bd) 1601 if (host->dma.bd)
1360 dma_free_coherent(&pdev->dev, 1602 dma_free_coherent(&pdev->dev,
@@ -1403,6 +1645,8 @@ static void msdc_save_reg(struct msdc_host *host)
1403 host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); 1645 host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
1404 host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT); 1646 host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
1405 host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1); 1647 host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
1648 host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
1649 host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
1406} 1650}
1407 1651
1408static void msdc_restore_reg(struct msdc_host *host) 1652static void msdc_restore_reg(struct msdc_host *host)
@@ -1413,6 +1657,8 @@ static void msdc_restore_reg(struct msdc_host *host)
1413 writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE); 1657 writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE);
1414 writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT); 1658 writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
1415 writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1); 1659 writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
1660 writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
1661 writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
1416} 1662}
1417 1663
1418static int msdc_runtime_suspend(struct device *dev) 1664static int msdc_runtime_suspend(struct device *dev)
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index b763b11ed9e1..b9958a123594 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1490,6 +1490,7 @@ static const struct of_device_id mmc_omap_match[] = {
1490 { .compatible = "ti,omap2420-mmc", }, 1490 { .compatible = "ti,omap2420-mmc", },
1491 { }, 1491 { },
1492}; 1492};
1493MODULE_DEVICE_TABLE(of, mmc_omap_match);
1493#endif 1494#endif
1494 1495
1495static struct platform_driver mmc_omap_driver = { 1496static struct platform_driver mmc_omap_driver = {
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 22d929fa3371..f6047fc94062 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -207,7 +207,9 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
207 .caps2 = MMC_CAP2_HC_ERASE_SZ, 207 .caps2 = MMC_CAP2_HC_ERASE_SZ,
208 .flags = SDHCI_ACPI_RUNTIME_PM, 208 .flags = SDHCI_ACPI_RUNTIME_PM,
209 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 209 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
210 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC, 210 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
211 SDHCI_QUIRK2_STOP_WITH_TC |
212 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
211 .probe_slot = sdhci_acpi_emmc_probe_slot, 213 .probe_slot = sdhci_acpi_emmc_probe_slot,
212}; 214};
213 215
@@ -239,6 +241,9 @@ struct sdhci_acpi_uid_slot {
239}; 241};
240 242
241static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { 243static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
244 { "80865ACA", NULL, &sdhci_acpi_slot_int_sd },
245 { "80865ACC", NULL, &sdhci_acpi_slot_int_emmc },
246 { "80865AD0", NULL, &sdhci_acpi_slot_int_sdio },
242 { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc }, 247 { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
243 { "80860F14" , "3" , &sdhci_acpi_slot_int_sd }, 248 { "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
244 { "80860F16" , NULL, &sdhci_acpi_slot_int_sd }, 249 { "80860F16" , NULL, &sdhci_acpi_slot_int_sd },
@@ -247,11 +252,15 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
247 { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, 252 { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
248 { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, 253 { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
249 { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio }, 254 { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
255 { "PNP0FFF" , "3" , &sdhci_acpi_slot_int_sd },
250 { "PNP0D40" }, 256 { "PNP0D40" },
251 { }, 257 { },
252}; 258};
253 259
254static const struct acpi_device_id sdhci_acpi_ids[] = { 260static const struct acpi_device_id sdhci_acpi_ids[] = {
261 { "80865ACA" },
262 { "80865ACC" },
263 { "80865AD0" },
255 { "80860F14" }, 264 { "80860F14" },
256 { "80860F16" }, 265 { "80860F16" },
257 { "INT33BB" }, 266 { "INT33BB" },
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index 2bd90fb35c75..00a8a40a3729 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -273,7 +273,7 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
273 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 273 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
274 274
275 dev_dbg(dev, "is_8bit=%c\n", 275 dev_dbg(dev, "is_8bit=%c\n",
276 (host->mmc->caps | MMC_CAP_8_BIT_DATA) ? 'Y' : 'N'); 276 (host->mmc->caps & MMC_CAP_8_BIT_DATA) ? 'Y' : 'N');
277 277
278 ret = sdhci_bcm_kona_sd_reset(host); 278 ret = sdhci_bcm_kona_sd_reset(host);
279 if (ret) 279 if (ret)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 886d230f41d0..1f1582f6cccb 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -759,7 +759,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
759 min = ESDHC_TUNE_CTRL_MIN; 759 min = ESDHC_TUNE_CTRL_MIN;
760 while (min < ESDHC_TUNE_CTRL_MAX) { 760 while (min < ESDHC_TUNE_CTRL_MAX) {
761 esdhc_prepare_tuning(host, min); 761 esdhc_prepare_tuning(host, min);
762 if (!mmc_send_tuning(host->mmc)) 762 if (!mmc_send_tuning(host->mmc, opcode, NULL))
763 break; 763 break;
764 min += ESDHC_TUNE_CTRL_STEP; 764 min += ESDHC_TUNE_CTRL_STEP;
765 } 765 }
@@ -768,7 +768,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
768 max = min + ESDHC_TUNE_CTRL_STEP; 768 max = min + ESDHC_TUNE_CTRL_STEP;
769 while (max < ESDHC_TUNE_CTRL_MAX) { 769 while (max < ESDHC_TUNE_CTRL_MAX) {
770 esdhc_prepare_tuning(host, max); 770 esdhc_prepare_tuning(host, max);
771 if (mmc_send_tuning(host->mmc)) { 771 if (mmc_send_tuning(host->mmc, opcode, NULL)) {
772 max -= ESDHC_TUNE_CTRL_STEP; 772 max -= ESDHC_TUNE_CTRL_STEP;
773 break; 773 break;
774 } 774 }
@@ -778,7 +778,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
778 /* use average delay to get the best timing */ 778 /* use average delay to get the best timing */
779 avg = (min + max) / 2; 779 avg = (min + max) / 2;
780 esdhc_prepare_tuning(host, avg); 780 esdhc_prepare_tuning(host, avg);
781 ret = mmc_send_tuning(host->mmc); 781 ret = mmc_send_tuning(host->mmc, opcode, NULL);
782 esdhc_post_tuning(host); 782 esdhc_post_tuning(host);
783 783
784 dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n", 784 dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 163ac9974d91..de132e281753 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -24,6 +24,8 @@
24 SDHCI_QUIRK_PIO_NEEDS_DELAY | \ 24 SDHCI_QUIRK_PIO_NEEDS_DELAY | \
25 SDHCI_QUIRK_NO_HISPD_BIT) 25 SDHCI_QUIRK_NO_HISPD_BIT)
26 26
27#define ESDHC_PROCTL 0x28
28
27#define ESDHC_SYSTEM_CONTROL 0x2c 29#define ESDHC_SYSTEM_CONTROL 0x2c
28#define ESDHC_CLOCK_MASK 0x0000fff0 30#define ESDHC_CLOCK_MASK 0x0000fff0
29#define ESDHC_PREDIV_SHIFT 8 31#define ESDHC_PREDIV_SHIFT 8
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4bcee033feda..4695bee203ea 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -373,7 +373,7 @@ retry:
373 if (rc) 373 if (rc)
374 return rc; 374 return rc;
375 375
376 rc = mmc_send_tuning(mmc); 376 rc = mmc_send_tuning(mmc, opcode, NULL);
377 if (!rc) { 377 if (!rc) {
378 /* Tuning is successful at this tuning point */ 378 /* Tuning is successful at this tuning point */
379 tuned_phases[tuned_phase_cnt++] = phase; 379 tuned_phases[tuned_phase_cnt++] = phase;
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index a0f05de5409f..06d0b50dfe71 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -111,7 +111,6 @@ static int sdhci_at91_probe(struct platform_device *pdev)
111 if (ret < 0) { 111 if (ret < 0) {
112 dev_err(&pdev->dev, "failed to set gck"); 112 dev_err(&pdev->dev, "failed to set gck");
113 goto hclock_disable_unprepare; 113 goto hclock_disable_unprepare;
114 return -EINVAL;
115 } 114 }
116 /* 115 /*
117 * We need to check if we have the requested rate for gck because in 116 * We need to check if we have the requested rate for gck because in
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 653f335bef15..90e94a028a49 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -24,122 +24,324 @@
24 24
25#define VENDOR_V_22 0x12 25#define VENDOR_V_22 0x12
26#define VENDOR_V_23 0x13 26#define VENDOR_V_23 0x13
27static u32 esdhc_readl(struct sdhci_host *host, int reg) 27
28struct sdhci_esdhc {
29 u8 vendor_ver;
30 u8 spec_ver;
31};
32
33/**
34 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
35 * to make it compatible with SD spec.
36 *
37 * @host: pointer to sdhci_host
38 * @spec_reg: SD spec register address
39 * @value: 32bit eSDHC register value on spec_reg address
40 *
41 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
42 * registers are 32 bits. There are differences in register size, register
43 * address, register function, bit position and function between eSDHC spec
44 * and SD spec.
45 *
46 * Return a fixed up register value
47 */
48static u32 esdhc_readl_fixup(struct sdhci_host *host,
49 int spec_reg, u32 value)
28{ 50{
51 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
52 struct sdhci_esdhc *esdhc = pltfm_host->priv;
29 u32 ret; 53 u32 ret;
30 54
31 ret = in_be32(host->ioaddr + reg);
32 /* 55 /*
33 * The bit of ADMA flag in eSDHC is not compatible with standard 56 * The bit of ADMA flag in eSDHC is not compatible with standard
34 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is 57 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
35 * supported by eSDHC. 58 * supported by eSDHC.
36 * And for many FSL eSDHC controller, the reset value of field 59 * And for many FSL eSDHC controller, the reset value of field
37 * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA, 60 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
38 * only these vendor version is greater than 2.2/0x12 support ADMA. 61 * only these vendor version is greater than 2.2/0x12 support ADMA.
39 * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the
40 * the verdor version number, oxFE is SDHCI_HOST_VERSION.
41 */ 62 */
42 if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) { 63 if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
43 u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); 64 if (esdhc->vendor_ver > VENDOR_V_22) {
44 tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; 65 ret = value | SDHCI_CAN_DO_ADMA2;
45 if (tmp > VENDOR_V_22) 66 return ret;
46 ret |= SDHCI_CAN_DO_ADMA2; 67 }
47 } 68 }
48 69 ret = value;
49 return ret; 70 return ret;
50} 71}
51 72
52static u16 esdhc_readw(struct sdhci_host *host, int reg) 73static u16 esdhc_readw_fixup(struct sdhci_host *host,
74 int spec_reg, u32 value)
53{ 75{
54 u16 ret; 76 u16 ret;
55 int base = reg & ~0x3; 77 int shift = (spec_reg & 0x2) * 8;
56 int shift = (reg & 0x2) * 8;
57 78
58 if (unlikely(reg == SDHCI_HOST_VERSION)) 79 if (spec_reg == SDHCI_HOST_VERSION)
59 ret = in_be32(host->ioaddr + base) & 0xffff; 80 ret = value & 0xffff;
60 else 81 else
61 ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff; 82 ret = (value >> shift) & 0xffff;
62 return ret; 83 return ret;
63} 84}
64 85
65static u8 esdhc_readb(struct sdhci_host *host, int reg) 86static u8 esdhc_readb_fixup(struct sdhci_host *host,
87 int spec_reg, u32 value)
66{ 88{
67 int base = reg & ~0x3; 89 u8 ret;
68 int shift = (reg & 0x3) * 8; 90 u8 dma_bits;
69 u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; 91 int shift = (spec_reg & 0x3) * 8;
92
93 ret = (value >> shift) & 0xff;
70 94
71 /* 95 /*
72 * "DMA select" locates at offset 0x28 in SD specification, but on 96 * "DMA select" locates at offset 0x28 in SD specification, but on
73 * P5020 or P3041, it locates at 0x29. 97 * P5020 or P3041, it locates at 0x29.
74 */ 98 */
75 if (reg == SDHCI_HOST_CONTROL) { 99 if (spec_reg == SDHCI_HOST_CONTROL) {
76 u32 dma_bits;
77
78 dma_bits = in_be32(host->ioaddr + reg);
79 /* DMA select is 22,23 bits in Protocol Control Register */ 100 /* DMA select is 22,23 bits in Protocol Control Register */
80 dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK; 101 dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
81
82 /* fixup the result */ 102 /* fixup the result */
83 ret &= ~SDHCI_CTRL_DMA_MASK; 103 ret &= ~SDHCI_CTRL_DMA_MASK;
84 ret |= dma_bits; 104 ret |= dma_bits;
85 } 105 }
86
87 return ret; 106 return ret;
88} 107}
89 108
90static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) 109/**
110 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
111 * written into eSDHC register.
112 *
113 * @host: pointer to sdhci_host
114 * @spec_reg: SD spec register address
115 * @value: 8/16/32bit SD spec register value that would be written
116 * @old_value: 32bit eSDHC register value on spec_reg address
117 *
118 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
119 * registers are 32 bits. There are differences in register size, register
120 * address, register function, bit position and function between eSDHC spec
121 * and SD spec.
122 *
123 * Return a fixed up register value
124 */
125static u32 esdhc_writel_fixup(struct sdhci_host *host,
126 int spec_reg, u32 value, u32 old_value)
91{ 127{
128 u32 ret;
129
92 /* 130 /*
93 * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] 131 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
94 * when SYSCTL[RSTD]) is set for some special operations. 132 * when SYSCTL[RSTD] is set for some special operations.
95 * No any impact other operation. 133 * No any impact on other operation.
96 */ 134 */
97 if (reg == SDHCI_INT_ENABLE) 135 if (spec_reg == SDHCI_INT_ENABLE)
98 val |= SDHCI_INT_BLK_GAP; 136 ret = value | SDHCI_INT_BLK_GAP;
99 sdhci_be32bs_writel(host, val, reg); 137 else
138 ret = value;
139
140 return ret;
100} 141}
101 142
102static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) 143static u32 esdhc_writew_fixup(struct sdhci_host *host,
144 int spec_reg, u16 value, u32 old_value)
103{ 145{
104 if (reg == SDHCI_BLOCK_SIZE) { 146 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
147 int shift = (spec_reg & 0x2) * 8;
148 u32 ret;
149
150 switch (spec_reg) {
151 case SDHCI_TRANSFER_MODE:
152 /*
153 * Postpone this write, we must do it together with a
154 * command write that is down below. Return old value.
155 */
156 pltfm_host->xfer_mode_shadow = value;
157 return old_value;
158 case SDHCI_COMMAND:
159 ret = (value << 16) | pltfm_host->xfer_mode_shadow;
160 return ret;
161 }
162
163 ret = old_value & (~(0xffff << shift));
164 ret |= (value << shift);
165
166 if (spec_reg == SDHCI_BLOCK_SIZE) {
105 /* 167 /*
106 * Two last DMA bits are reserved, and first one is used for 168 * Two last DMA bits are reserved, and first one is used for
107 * non-standard blksz of 4096 bytes that we don't support 169 * non-standard blksz of 4096 bytes that we don't support
108 * yet. So clear the DMA boundary bits. 170 * yet. So clear the DMA boundary bits.
109 */ 171 */
110 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); 172 ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
111 } 173 }
112 sdhci_be32bs_writew(host, val, reg); 174 return ret;
113} 175}
114 176
115static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) 177static u32 esdhc_writeb_fixup(struct sdhci_host *host,
178 int spec_reg, u8 value, u32 old_value)
116{ 179{
180 u32 ret;
181 u32 dma_bits;
182 u8 tmp;
183 int shift = (spec_reg & 0x3) * 8;
184
185 /*
186 * eSDHC doesn't have a standard power control register, so we do
187 * nothing here to avoid incorrect operation.
188 */
189 if (spec_reg == SDHCI_POWER_CONTROL)
190 return old_value;
117 /* 191 /*
118 * "DMA select" location is offset 0x28 in SD specification, but on 192 * "DMA select" location is offset 0x28 in SD specification, but on
119 * P5020 or P3041, it's located at 0x29. 193 * P5020 or P3041, it's located at 0x29.
120 */ 194 */
121 if (reg == SDHCI_HOST_CONTROL) { 195 if (spec_reg == SDHCI_HOST_CONTROL) {
122 u32 dma_bits;
123
124 /* 196 /*
125 * If host control register is not standard, exit 197 * If host control register is not standard, exit
126 * this function 198 * this function
127 */ 199 */
128 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL) 200 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
129 return; 201 return old_value;
130 202
131 /* DMA select is 22,23 bits in Protocol Control Register */ 203 /* DMA select is 22,23 bits in Protocol Control Register */
132 dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5; 204 dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
133 clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5, 205 ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
134 dma_bits); 206 tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
135 val &= ~SDHCI_CTRL_DMA_MASK; 207 (old_value & SDHCI_CTRL_DMA_MASK);
136 val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK; 208 ret = (ret & (~0xff)) | tmp;
209
210 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
211 ret &= ~ESDHC_HOST_CONTROL_RES;
212 return ret;
137 } 213 }
138 214
139 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ 215 ret = (old_value & (~(0xff << shift))) | (value << shift);
140 if (reg == SDHCI_HOST_CONTROL) 216 return ret;
141 val &= ~ESDHC_HOST_CONTROL_RES; 217}
142 sdhci_be32bs_writeb(host, val, reg); 218
219static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
220{
221 u32 ret;
222 u32 value;
223
224 value = ioread32be(host->ioaddr + reg);
225 ret = esdhc_readl_fixup(host, reg, value);
226
227 return ret;
228}
229
230static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
231{
232 u32 ret;
233 u32 value;
234
235 value = ioread32(host->ioaddr + reg);
236 ret = esdhc_readl_fixup(host, reg, value);
237
238 return ret;
239}
240
241static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
242{
243 u16 ret;
244 u32 value;
245 int base = reg & ~0x3;
246
247 value = ioread32be(host->ioaddr + base);
248 ret = esdhc_readw_fixup(host, reg, value);
249 return ret;
250}
251
252static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
253{
254 u16 ret;
255 u32 value;
256 int base = reg & ~0x3;
257
258 value = ioread32(host->ioaddr + base);
259 ret = esdhc_readw_fixup(host, reg, value);
260 return ret;
261}
262
263static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
264{
265 u8 ret;
266 u32 value;
267 int base = reg & ~0x3;
268
269 value = ioread32be(host->ioaddr + base);
270 ret = esdhc_readb_fixup(host, reg, value);
271 return ret;
272}
273
274static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
275{
276 u8 ret;
277 u32 value;
278 int base = reg & ~0x3;
279
280 value = ioread32(host->ioaddr + base);
281 ret = esdhc_readb_fixup(host, reg, value);
282 return ret;
283}
284
285static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
286{
287 u32 value;
288
289 value = esdhc_writel_fixup(host, reg, val, 0);
290 iowrite32be(value, host->ioaddr + reg);
291}
292
293static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
294{
295 u32 value;
296
297 value = esdhc_writel_fixup(host, reg, val, 0);
298 iowrite32(value, host->ioaddr + reg);
299}
300
301static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
302{
303 int base = reg & ~0x3;
304 u32 value;
305 u32 ret;
306
307 value = ioread32be(host->ioaddr + base);
308 ret = esdhc_writew_fixup(host, reg, val, value);
309 if (reg != SDHCI_TRANSFER_MODE)
310 iowrite32be(ret, host->ioaddr + base);
311}
312
313static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
314{
315 int base = reg & ~0x3;
316 u32 value;
317 u32 ret;
318
319 value = ioread32(host->ioaddr + base);
320 ret = esdhc_writew_fixup(host, reg, val, value);
321 if (reg != SDHCI_TRANSFER_MODE)
322 iowrite32(ret, host->ioaddr + base);
323}
324
325static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
326{
327 int base = reg & ~0x3;
328 u32 value;
329 u32 ret;
330
331 value = ioread32be(host->ioaddr + base);
332 ret = esdhc_writeb_fixup(host, reg, val, value);
333 iowrite32be(ret, host->ioaddr + base);
334}
335
336static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
337{
338 int base = reg & ~0x3;
339 u32 value;
340 u32 ret;
341
342 value = ioread32(host->ioaddr + base);
343 ret = esdhc_writeb_fixup(host, reg, val, value);
344 iowrite32(ret, host->ioaddr + base);
143} 345}
144 346
145/* 347/*
@@ -149,19 +351,17 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
149 * For Continue, apply soft reset for data(SYSCTL[RSTD]); 351 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
150 * and re-issue the entire read transaction from beginning. 352 * and re-issue the entire read transaction from beginning.
151 */ 353 */
152static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask) 354static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
153{ 355{
154 u32 tmp; 356 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
357 struct sdhci_esdhc *esdhc = pltfm_host->priv;
155 bool applicable; 358 bool applicable;
156 dma_addr_t dmastart; 359 dma_addr_t dmastart;
157 dma_addr_t dmanow; 360 dma_addr_t dmanow;
158 361
159 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
160 tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
161
162 applicable = (intmask & SDHCI_INT_DATA_END) && 362 applicable = (intmask & SDHCI_INT_DATA_END) &&
163 (intmask & SDHCI_INT_BLK_GAP) && 363 (intmask & SDHCI_INT_BLK_GAP) &&
164 (tmp == VENDOR_V_23); 364 (esdhc->vendor_ver == VENDOR_V_23);
165 if (!applicable) 365 if (!applicable)
166 return; 366 return;
167 367
@@ -179,7 +379,11 @@ static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask)
179 379
180static int esdhc_of_enable_dma(struct sdhci_host *host) 380static int esdhc_of_enable_dma(struct sdhci_host *host)
181{ 381{
182 setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); 382 u32 value;
383
384 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
385 value |= ESDHC_DMA_SNOOP;
386 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
183 return 0; 387 return 0;
184} 388}
185 389
@@ -199,6 +403,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
199 403
200static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) 404static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
201{ 405{
406 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
407 struct sdhci_esdhc *esdhc = pltfm_host->priv;
202 int pre_div = 1; 408 int pre_div = 1;
203 int div = 1; 409 int div = 1;
204 u32 temp; 410 u32 temp;
@@ -209,9 +415,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
209 return; 415 return;
210 416
211 /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ 417 /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
212 temp = esdhc_readw(host, SDHCI_HOST_VERSION); 418 if (esdhc->vendor_ver < VENDOR_V_23)
213 temp = (temp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
214 if (temp < VENDOR_V_23)
215 pre_div = 2; 419 pre_div = 2;
216 420
217 /* Workaround to reduce the clock frequency for p1010 esdhc */ 421 /* Workaround to reduce the clock frequency for p1010 esdhc */
@@ -247,39 +451,26 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
247 mdelay(1); 451 mdelay(1);
248} 452}
249 453
250static void esdhc_of_platform_init(struct sdhci_host *host)
251{
252 u32 vvn;
253
254 vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
255 vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
256 if (vvn == VENDOR_V_22)
257 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
258
259 if (vvn > VENDOR_V_22)
260 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
261}
262
263static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) 454static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
264{ 455{
265 u32 ctrl; 456 u32 ctrl;
266 457
458 ctrl = sdhci_readl(host, ESDHC_PROCTL);
459 ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
267 switch (width) { 460 switch (width) {
268 case MMC_BUS_WIDTH_8: 461 case MMC_BUS_WIDTH_8:
269 ctrl = ESDHC_CTRL_8BITBUS; 462 ctrl |= ESDHC_CTRL_8BITBUS;
270 break; 463 break;
271 464
272 case MMC_BUS_WIDTH_4: 465 case MMC_BUS_WIDTH_4:
273 ctrl = ESDHC_CTRL_4BITBUS; 466 ctrl |= ESDHC_CTRL_4BITBUS;
274 break; 467 break;
275 468
276 default: 469 default:
277 ctrl = 0;
278 break; 470 break;
279 } 471 }
280 472
281 clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL, 473 sdhci_writel(host, ctrl, ESDHC_PROCTL);
282 ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
283} 474}
284 475
285static void esdhc_reset(struct sdhci_host *host, u8 mask) 476static void esdhc_reset(struct sdhci_host *host, u8 mask)
@@ -290,32 +481,13 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
290 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 481 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
291} 482}
292 483
293static const struct sdhci_ops sdhci_esdhc_ops = {
294 .read_l = esdhc_readl,
295 .read_w = esdhc_readw,
296 .read_b = esdhc_readb,
297 .write_l = esdhc_writel,
298 .write_w = esdhc_writew,
299 .write_b = esdhc_writeb,
300 .set_clock = esdhc_of_set_clock,
301 .enable_dma = esdhc_of_enable_dma,
302 .get_max_clock = esdhc_of_get_max_clock,
303 .get_min_clock = esdhc_of_get_min_clock,
304 .platform_init = esdhc_of_platform_init,
305 .adma_workaround = esdhci_of_adma_workaround,
306 .set_bus_width = esdhc_pltfm_set_bus_width,
307 .reset = esdhc_reset,
308 .set_uhs_signaling = sdhci_set_uhs_signaling,
309};
310
311#ifdef CONFIG_PM 484#ifdef CONFIG_PM
312
313static u32 esdhc_proctl; 485static u32 esdhc_proctl;
314static int esdhc_of_suspend(struct device *dev) 486static int esdhc_of_suspend(struct device *dev)
315{ 487{
316 struct sdhci_host *host = dev_get_drvdata(dev); 488 struct sdhci_host *host = dev_get_drvdata(dev);
317 489
318 esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); 490 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
319 491
320 return sdhci_suspend_host(host); 492 return sdhci_suspend_host(host);
321} 493}
@@ -328,9 +500,8 @@ static int esdhc_of_resume(struct device *dev)
328 if (ret == 0) { 500 if (ret == 0) {
329 /* Isn't this already done by sdhci_resume_host() ? --rmk */ 501 /* Isn't this already done by sdhci_resume_host() ? --rmk */
330 esdhc_of_enable_dma(host); 502 esdhc_of_enable_dma(host);
331 sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); 503 sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
332 } 504 }
333
334 return ret; 505 return ret;
335} 506}
336 507
@@ -343,37 +514,103 @@ static const struct dev_pm_ops esdhc_pmops = {
343#define ESDHC_PMOPS NULL 514#define ESDHC_PMOPS NULL
344#endif 515#endif
345 516
346static const struct sdhci_pltfm_data sdhci_esdhc_pdata = { 517static const struct sdhci_ops sdhci_esdhc_be_ops = {
347 /* 518 .read_l = esdhc_be_readl,
348 * card detection could be handled via GPIO 519 .read_w = esdhc_be_readw,
349 * eSDHC cannot support End Attribute in NOP ADMA descriptor 520 .read_b = esdhc_be_readb,
350 */ 521 .write_l = esdhc_be_writel,
522 .write_w = esdhc_be_writew,
523 .write_b = esdhc_be_writeb,
524 .set_clock = esdhc_of_set_clock,
525 .enable_dma = esdhc_of_enable_dma,
526 .get_max_clock = esdhc_of_get_max_clock,
527 .get_min_clock = esdhc_of_get_min_clock,
528 .adma_workaround = esdhc_of_adma_workaround,
529 .set_bus_width = esdhc_pltfm_set_bus_width,
530 .reset = esdhc_reset,
531 .set_uhs_signaling = sdhci_set_uhs_signaling,
532};
533
534static const struct sdhci_ops sdhci_esdhc_le_ops = {
535 .read_l = esdhc_le_readl,
536 .read_w = esdhc_le_readw,
537 .read_b = esdhc_le_readb,
538 .write_l = esdhc_le_writel,
539 .write_w = esdhc_le_writew,
540 .write_b = esdhc_le_writeb,
541 .set_clock = esdhc_of_set_clock,
542 .enable_dma = esdhc_of_enable_dma,
543 .get_max_clock = esdhc_of_get_max_clock,
544 .get_min_clock = esdhc_of_get_min_clock,
545 .adma_workaround = esdhc_of_adma_workaround,
546 .set_bus_width = esdhc_pltfm_set_bus_width,
547 .reset = esdhc_reset,
548 .set_uhs_signaling = sdhci_set_uhs_signaling,
549};
550
551static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
351 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION 552 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
352 | SDHCI_QUIRK_NO_CARD_NO_RESET 553 | SDHCI_QUIRK_NO_CARD_NO_RESET
353 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 554 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
354 .ops = &sdhci_esdhc_ops, 555 .ops = &sdhci_esdhc_be_ops,
355}; 556};
356 557
558static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
559 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
560 | SDHCI_QUIRK_NO_CARD_NO_RESET
561 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
562 .ops = &sdhci_esdhc_le_ops,
563};
564
565static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
566{
567 struct sdhci_pltfm_host *pltfm_host;
568 struct sdhci_esdhc *esdhc;
569 u16 host_ver;
570
571 pltfm_host = sdhci_priv(host);
572 esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc),
573 GFP_KERNEL);
574
575 host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
576 esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
577 SDHCI_VENDOR_VER_SHIFT;
578 esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
579
580 pltfm_host->priv = esdhc;
581}
582
357static int sdhci_esdhc_probe(struct platform_device *pdev) 583static int sdhci_esdhc_probe(struct platform_device *pdev)
358{ 584{
359 struct sdhci_host *host; 585 struct sdhci_host *host;
360 struct device_node *np; 586 struct device_node *np;
361 int ret; 587 int ret;
362 588
363 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0); 589 np = pdev->dev.of_node;
590
591 if (of_get_property(np, "little-endian", NULL))
592 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0);
593 else
594 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0);
595
364 if (IS_ERR(host)) 596 if (IS_ERR(host))
365 return PTR_ERR(host); 597 return PTR_ERR(host);
366 598
599 esdhc_init(pdev, host);
600
367 sdhci_get_of_property(pdev); 601 sdhci_get_of_property(pdev);
368 602
369 np = pdev->dev.of_node;
370 if (of_device_is_compatible(np, "fsl,p5040-esdhc") || 603 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
371 of_device_is_compatible(np, "fsl,p5020-esdhc") || 604 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
372 of_device_is_compatible(np, "fsl,p4080-esdhc") || 605 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
373 of_device_is_compatible(np, "fsl,p1020-esdhc") || 606 of_device_is_compatible(np, "fsl,p1020-esdhc") ||
374 of_device_is_compatible(np, "fsl,t1040-esdhc")) 607 of_device_is_compatible(np, "fsl,t1040-esdhc") ||
608 of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
375 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 609 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
376 610
611 if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
612 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
613
377 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { 614 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
378 /* 615 /*
379 * Freescale messed up with P2020 as it has a non-standard 616 * Freescale messed up with P2020 as it has a non-standard
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci-core.c
index b3b0a3e4fca1..cf7ad458b4f4 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -444,11 +444,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
444 else 444 else
445 scratch &= ~0x47; 445 scratch &= ~0x47;
446 446
447 ret = pci_write_config_byte(chip->pdev, 0xAE, scratch); 447 return pci_write_config_byte(chip->pdev, 0xAE, scratch);
448 if (ret)
449 return ret;
450
451 return 0;
452} 448}
453 449
454static int jmicron_probe(struct sdhci_pci_chip *chip) 450static int jmicron_probe(struct sdhci_pci_chip *chip)
@@ -1113,6 +1109,62 @@ static const struct pci_device_id pci_ids[] = {
1113 }, 1109 },
1114 1110
1115 { 1111 {
1112 .vendor = PCI_VENDOR_ID_INTEL,
1113 .device = PCI_DEVICE_ID_INTEL_DNV_EMMC,
1114 .subvendor = PCI_ANY_ID,
1115 .subdevice = PCI_ANY_ID,
1116 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
1117 },
1118
1119 {
1120 .vendor = PCI_VENDOR_ID_INTEL,
1121 .device = PCI_DEVICE_ID_INTEL_BXT_EMMC,
1122 .subvendor = PCI_ANY_ID,
1123 .subdevice = PCI_ANY_ID,
1124 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
1125 },
1126
1127 {
1128 .vendor = PCI_VENDOR_ID_INTEL,
1129 .device = PCI_DEVICE_ID_INTEL_BXT_SDIO,
1130 .subvendor = PCI_ANY_ID,
1131 .subdevice = PCI_ANY_ID,
1132 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
1133 },
1134
1135 {
1136 .vendor = PCI_VENDOR_ID_INTEL,
1137 .device = PCI_DEVICE_ID_INTEL_BXT_SD,
1138 .subvendor = PCI_ANY_ID,
1139 .subdevice = PCI_ANY_ID,
1140 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
1141 },
1142
1143 {
1144 .vendor = PCI_VENDOR_ID_INTEL,
1145 .device = PCI_DEVICE_ID_INTEL_APL_EMMC,
1146 .subvendor = PCI_ANY_ID,
1147 .subdevice = PCI_ANY_ID,
1148 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
1149 },
1150
1151 {
1152 .vendor = PCI_VENDOR_ID_INTEL,
1153 .device = PCI_DEVICE_ID_INTEL_APL_SDIO,
1154 .subvendor = PCI_ANY_ID,
1155 .subdevice = PCI_ANY_ID,
1156 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
1157 },
1158
1159 {
1160 .vendor = PCI_VENDOR_ID_INTEL,
1161 .device = PCI_DEVICE_ID_INTEL_APL_SD,
1162 .subvendor = PCI_ANY_ID,
1163 .subdevice = PCI_ANY_ID,
1164 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
1165 },
1166
1167 {
1116 .vendor = PCI_VENDOR_ID_O2, 1168 .vendor = PCI_VENDOR_ID_O2,
1117 .device = PCI_DEVICE_ID_O2_8120, 1169 .device = PCI_DEVICE_ID_O2_8120,
1118 .subvendor = PCI_ANY_ID, 1170 .subvendor = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index e2ec108dba0e..d48f03104b5b 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -60,7 +60,7 @@ static void o2_pci_led_enable(struct sdhci_pci_chip *chip)
60 60
61} 61}
62 62
63void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip) 63static void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
64{ 64{
65 u32 scratch_32; 65 u32 scratch_32;
66 int ret; 66 int ret;
@@ -145,7 +145,6 @@ void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
145 scratch_32 |= 0x00080000; 145 scratch_32 |= 0x00080000;
146 pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32); 146 pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32);
147} 147}
148EXPORT_SYMBOL_GPL(sdhci_pci_o2_fujin2_pci_init);
149 148
150int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) 149int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
151{ 150{
@@ -179,7 +178,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
179 178
180 return 0; 179 return 0;
181} 180}
182EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe_slot);
183 181
184int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) 182int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
185{ 183{
@@ -385,11 +383,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
385 383
386 return 0; 384 return 0;
387} 385}
388EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe);
389 386
390int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip) 387int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
391{ 388{
392 sdhci_pci_o2_probe(chip); 389 sdhci_pci_o2_probe(chip);
393 return 0; 390 return 0;
394} 391}
395EXPORT_SYMBOL_GPL(sdhci_pci_o2_resume);
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h
index f7ffc908d9a0..770f53857211 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.h
+++ b/drivers/mmc/host/sdhci-pci-o2micro.h
@@ -64,8 +64,6 @@
64#define O2_SD_VENDOR_SETTING 0x110 64#define O2_SD_VENDOR_SETTING 0x110
65#define O2_SD_VENDOR_SETTING2 0x1C8 65#define O2_SD_VENDOR_SETTING2 0x1C8
66 66
67extern void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip);
68
69extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); 67extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
70 68
71extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); 69extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 541f1cad5247..d1a0b4db60db 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -24,6 +24,13 @@
24#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b 24#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b
25#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c 25#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
26#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d 26#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
27#define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db
28#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
29#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
30#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
31#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
32#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
33#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
27 34
28/* 35/*
29 * PCI registers 36 * PCI registers
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index a207f5aaf62f..87fb5ea8ebe7 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -71,9 +71,7 @@ void sdhci_get_of_property(struct platform_device *pdev)
71 struct device_node *np = pdev->dev.of_node; 71 struct device_node *np = pdev->dev.of_node;
72 struct sdhci_host *host = platform_get_drvdata(pdev); 72 struct sdhci_host *host = platform_get_drvdata(pdev);
73 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 73 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
74 const __be32 *clk;
75 u32 bus_width; 74 u32 bus_width;
76 int size;
77 75
78 if (of_get_property(np, "sdhci,auto-cmd12", NULL)) 76 if (of_get_property(np, "sdhci,auto-cmd12", NULL))
79 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; 77 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
@@ -101,9 +99,7 @@ void sdhci_get_of_property(struct platform_device *pdev)
101 of_device_is_compatible(np, "fsl,mpc8536-esdhc")) 99 of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
102 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 100 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
103 101
104 clk = of_get_property(np, "clock-frequency", &size); 102 of_property_read_u32(np, "clock-frequency", &pltfm_host->clock);
105 if (clk && size == sizeof(*clk) && *clk)
106 pltfm_host->clock = be32_to_cpup(clk);
107 103
108 if (of_find_property(np, "keep-power-in-suspend", NULL)) 104 if (of_find_property(np, "keep-power-in-suspend", NULL))
109 host->mmc->pm_caps |= MMC_PM_KEEP_POWER; 105 host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 884294576356..34866f668dd7 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -50,7 +50,8 @@ static u32 sdhci_sirf_readl_le(struct sdhci_host *host, int reg)
50 if (unlikely((reg == SDHCI_CAPABILITIES_1) && 50 if (unlikely((reg == SDHCI_CAPABILITIES_1) &&
51 (host->mmc->caps & MMC_CAP_UHS_SDR50))) { 51 (host->mmc->caps & MMC_CAP_UHS_SDR50))) {
52 /* fake CAP_1 register */ 52 /* fake CAP_1 register */
53 val = SDHCI_SUPPORT_SDR50 | SDHCI_USE_SDR50_TUNING; 53 val = SDHCI_SUPPORT_DDR50 |
54 SDHCI_SUPPORT_SDR50 | SDHCI_USE_SDR50_TUNING;
54 } 55 }
55 56
56 if (unlikely(reg == SDHCI_SLOT_INT_STATUS)) { 57 if (unlikely(reg == SDHCI_SLOT_INT_STATUS)) {
@@ -97,7 +98,7 @@ retry:
97 clock_setting | phase, 98 clock_setting | phase,
98 SDHCI_CLK_DELAY_SETTING); 99 SDHCI_CLK_DELAY_SETTING);
99 100
100 if (!mmc_send_tuning(mmc)) { 101 if (!mmc_send_tuning(mmc, opcode, NULL)) {
101 /* Tuning is successful at this tuning point */ 102 /* Tuning is successful at this tuning point */
102 tuned_phase_cnt++; 103 tuned_phase_cnt++;
103 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", 104 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index fbc7efdddcb5..b48565ed5616 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1895,9 +1895,9 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1895 tuning_count = host->tuning_count; 1895 tuning_count = host->tuning_count;
1896 1896
1897 /* 1897 /*
1898 * The Host Controller needs tuning only in case of SDR104 mode 1898 * The Host Controller needs tuning in case of SDR104 and DDR50
1899 * and for SDR50 mode when Use Tuning for SDR50 is set in the 1899 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1900 * Capabilities register. 1900 * the Capabilities register.
1901 * If the Host Controller supports the HS200 mode then the 1901 * If the Host Controller supports the HS200 mode then the
1902 * tuning function has to be executed. 1902 * tuning function has to be executed.
1903 */ 1903 */
@@ -1917,6 +1917,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1917 break; 1917 break;
1918 1918
1919 case MMC_TIMING_UHS_SDR104: 1919 case MMC_TIMING_UHS_SDR104:
1920 case MMC_TIMING_UHS_DDR50:
1920 break; 1921 break;
1921 1922
1922 case MMC_TIMING_UHS_SDR50: 1923 case MMC_TIMING_UHS_SDR50:
@@ -2716,17 +2717,6 @@ int sdhci_resume_host(struct sdhci_host *host)
2716 host->ops->enable_dma(host); 2717 host->ops->enable_dma(host);
2717 } 2718 }
2718 2719
2719 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2720 ret = request_threaded_irq(host->irq, sdhci_irq,
2721 sdhci_thread_irq, IRQF_SHARED,
2722 mmc_hostname(host->mmc), host);
2723 if (ret)
2724 return ret;
2725 } else {
2726 sdhci_disable_irq_wakeups(host);
2727 disable_irq_wake(host->irq);
2728 }
2729
2730 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && 2720 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2731 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 2721 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2732 /* Card keeps power but host controller does not */ 2722 /* Card keeps power but host controller does not */
@@ -2739,6 +2729,17 @@ int sdhci_resume_host(struct sdhci_host *host)
2739 mmiowb(); 2729 mmiowb();
2740 } 2730 }
2741 2731
2732 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2733 ret = request_threaded_irq(host->irq, sdhci_irq,
2734 sdhci_thread_irq, IRQF_SHARED,
2735 mmc_hostname(host->mmc), host);
2736 if (ret)
2737 return ret;
2738 } else {
2739 sdhci_disable_irq_wakeups(host);
2740 disable_irq_wake(host->irq);
2741 }
2742
2742 sdhci_enable_card_detection(host); 2743 sdhci_enable_card_detection(host);
2743 2744
2744 return ret; 2745 return ret;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index b981b8552e43..83de82bceafc 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -873,6 +873,13 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
873 spin_unlock_irqrestore(&host->lock, iflags); 873 spin_unlock_irqrestore(&host->lock, iflags);
874} 874}
875 875
876static int sunxi_mmc_card_busy(struct mmc_host *mmc)
877{
878 struct sunxi_mmc_host *host = mmc_priv(mmc);
879
880 return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY);
881}
882
876static const struct of_device_id sunxi_mmc_of_match[] = { 883static const struct of_device_id sunxi_mmc_of_match[] = {
877 { .compatible = "allwinner,sun4i-a10-mmc", }, 884 { .compatible = "allwinner,sun4i-a10-mmc", },
878 { .compatible = "allwinner,sun5i-a13-mmc", }, 885 { .compatible = "allwinner,sun5i-a13-mmc", },
@@ -888,6 +895,7 @@ static struct mmc_host_ops sunxi_mmc_ops = {
888 .get_cd = mmc_gpio_get_cd, 895 .get_cd = mmc_gpio_get_cd,
889 .enable_sdio_irq = sunxi_mmc_enable_sdio_irq, 896 .enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
890 .hw_reset = sunxi_mmc_hw_reset, 897 .hw_reset = sunxi_mmc_hw_reset,
898 .card_busy = sunxi_mmc_card_busy,
891}; 899};
892 900
893static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = { 901static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index fbabbb82b354..1e819f98b94f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -563,7 +563,7 @@ static void add_offloaded_reg(struct vub300_mmc_host *vub300,
563 i += 1; 563 i += 1;
564 continue; 564 continue;
565 } 565 }
566 }; 566 }
567 __add_offloaded_reg_to_fifo(vub300, register_access, func); 567 __add_offloaded_reg_to_fifo(vub300, register_access, func);
568} 568}
569 569
@@ -1372,7 +1372,7 @@ static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
1372 l += snprintf(vub300->vub_name + l, 1372 l += snprintf(vub300->vub_name + l,
1373 sizeof(vub300->vub_name) - l, "_%04X%04X", 1373 sizeof(vub300->vub_name) - l, "_%04X%04X",
1374 sf->vendor, sf->device); 1374 sf->vendor, sf->device);
1375 }; 1375 }
1376 snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin"); 1376 snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin");
1377 dev_info(&vub300->udev->dev, "requesting offload firmware %s\n", 1377 dev_info(&vub300->udev->dev, "requesting offload firmware %s\n",
1378 vub300->vub_name); 1378 vub300->vub_name);
@@ -1893,7 +1893,7 @@ static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300,
1893 i += 1; 1893 i += 1;
1894 continue; 1894 continue;
1895 } 1895 }
1896 }; 1896 }
1897 if (vub300->total_offload_count == 0) 1897 if (vub300->total_offload_count == 0)
1898 return 0; 1898 return 0;
1899 else if (vub300->fn[func].offload_count == 0) 1899 else if (vub300->fn[func].offload_count == 0)
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index ca183ea767b3..c3fd16d997ca 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -809,7 +809,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
809 cmd->error = -EINVAL; 809 cmd->error = -EINVAL;
810 810
811 goto done; 811 goto done;
812 }; 812 }
813 } 813 }
814 814
815 /* 815 /*
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 48ce83e443c2..8d50314ac3eb 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -847,21 +847,25 @@ static int emac_probe(struct platform_device *pdev)
847 if (ndev->irq == -ENXIO) { 847 if (ndev->irq == -ENXIO) {
848 netdev_err(ndev, "No irq resource\n"); 848 netdev_err(ndev, "No irq resource\n");
849 ret = ndev->irq; 849 ret = ndev->irq;
850 goto out; 850 goto out_iounmap;
851 } 851 }
852 852
853 db->clk = devm_clk_get(&pdev->dev, NULL); 853 db->clk = devm_clk_get(&pdev->dev, NULL);
854 if (IS_ERR(db->clk)) { 854 if (IS_ERR(db->clk)) {
855 ret = PTR_ERR(db->clk); 855 ret = PTR_ERR(db->clk);
856 goto out; 856 goto out_iounmap;
857 } 857 }
858 858
859 clk_prepare_enable(db->clk); 859 ret = clk_prepare_enable(db->clk);
860 if (ret) {
861 dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
862 goto out_iounmap;
863 }
860 864
861 ret = sunxi_sram_claim(&pdev->dev); 865 ret = sunxi_sram_claim(&pdev->dev);
862 if (ret) { 866 if (ret) {
863 dev_err(&pdev->dev, "Error couldn't map SRAM to device\n"); 867 dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
864 goto out; 868 goto out_clk_disable_unprepare;
865 } 869 }
866 870
867 db->phy_node = of_parse_phandle(np, "phy", 0); 871 db->phy_node = of_parse_phandle(np, "phy", 0);
@@ -910,6 +914,10 @@ static int emac_probe(struct platform_device *pdev)
910 914
911out_release_sram: 915out_release_sram:
912 sunxi_sram_release(&pdev->dev); 916 sunxi_sram_release(&pdev->dev);
917out_clk_disable_unprepare:
918 clk_disable_unprepare(db->clk);
919out_iounmap:
920 iounmap(db->membase);
913out: 921out:
914 dev_err(db->dev, "not found (%d).\n", ret); 922 dev_err(db->dev, "not found (%d).\n", ret);
915 923
@@ -921,8 +929,12 @@ out:
921static int emac_remove(struct platform_device *pdev) 929static int emac_remove(struct platform_device *pdev)
922{ 930{
923 struct net_device *ndev = platform_get_drvdata(pdev); 931 struct net_device *ndev = platform_get_drvdata(pdev);
932 struct emac_board_info *db = netdev_priv(ndev);
924 933
925 unregister_netdev(ndev); 934 unregister_netdev(ndev);
935 sunxi_sram_release(&pdev->dev);
936 clk_disable_unprepare(db->clk);
937 iounmap(db->membase);
926 free_netdev(ndev); 938 free_netdev(ndev);
927 939
928 dev_dbg(&pdev->dev, "released and freed device\n"); 940 dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index a4473d8ff4fa..f672dba345f7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1595,7 +1595,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1595 packet->rdesc_count, 1); 1595 packet->rdesc_count, 1);
1596 1596
1597 /* Make sure ownership is written to the descriptor */ 1597 /* Make sure ownership is written to the descriptor */
1598 dma_wmb(); 1598 smp_wmb();
1599 1599
1600 ring->cur = cur_index + 1; 1600 ring->cur = cur_index + 1;
1601 if (!packet->skb->xmit_more || 1601 if (!packet->skb->xmit_more ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index aae9d5ecd182..dde0486667e0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1807,6 +1807,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1807 struct netdev_queue *txq; 1807 struct netdev_queue *txq;
1808 int processed = 0; 1808 int processed = 0;
1809 unsigned int tx_packets = 0, tx_bytes = 0; 1809 unsigned int tx_packets = 0, tx_bytes = 0;
1810 unsigned int cur;
1810 1811
1811 DBGPR("-->xgbe_tx_poll\n"); 1812 DBGPR("-->xgbe_tx_poll\n");
1812 1813
@@ -1814,10 +1815,15 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1814 if (!ring) 1815 if (!ring)
1815 return 0; 1816 return 0;
1816 1817
1818 cur = ring->cur;
1819
1820 /* Be sure we get ring->cur before accessing descriptor data */
1821 smp_rmb();
1822
1817 txq = netdev_get_tx_queue(netdev, channel->queue_index); 1823 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1818 1824
1819 while ((processed < XGBE_TX_DESC_MAX_PROC) && 1825 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1820 (ring->dirty != ring->cur)) { 1826 (ring->dirty != cur)) {
1821 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 1827 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1822 rdesc = rdata->rdesc; 1828 rdesc = rdata->rdesc;
1823 1829
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7f2cc3e485e..4183c2abeeeb 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -2049,7 +2049,7 @@ static void swphy_poll_timer(unsigned long data)
2049 2049
2050 for (i = 0; i < priv->num_ports; i++) { 2050 for (i = 0; i < priv->num_ports; i++) {
2051 struct bcm63xx_enetsw_port *port; 2051 struct bcm63xx_enetsw_port *port;
2052 int val, j, up, advertise, lpa, lpa2, speed, duplex, media; 2052 int val, j, up, advertise, lpa, speed, duplex, media;
2053 int external_phy = bcm_enet_port_is_rgmii(i); 2053 int external_phy = bcm_enet_port_is_rgmii(i);
2054 u8 override; 2054 u8 override;
2055 2055
@@ -2092,22 +2092,27 @@ static void swphy_poll_timer(unsigned long data)
2092 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, 2092 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2093 MII_LPA); 2093 MII_LPA);
2094 2094
2095 lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2096 MII_STAT1000);
2097
2098 /* figure out media and duplex from advertise and LPA values */ 2095 /* figure out media and duplex from advertise and LPA values */
2099 media = mii_nway_result(lpa & advertise); 2096 media = mii_nway_result(lpa & advertise);
2100 duplex = (media & ADVERTISE_FULL) ? 1 : 0; 2097 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2101 if (lpa2 & LPA_1000FULL) 2098
2102 duplex = 1; 2099 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2103 2100 speed = 100;
2104 if (lpa2 & (LPA_1000FULL | LPA_1000HALF)) 2101 else
2105 speed = 1000; 2102 speed = 10;
2106 else { 2103
2107 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) 2104 if (val & BMSR_ESTATEN) {
2108 speed = 100; 2105 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2109 else 2106 port->phy_id, MII_CTRL1000);
2110 speed = 10; 2107
2108 lpa = bcmenet_sw_mdio_read(priv, external_phy,
2109 port->phy_id, MII_STAT1000);
2110
2111 if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2112 && lpa & (LPA_1000FULL | LPA_1000HALF)) {
2113 speed = 1000;
2114 duplex = (lpa & LPA_1000FULL);
2115 }
2111 } 2116 }
2112 2117
2113 dev_info(&priv->pdev->dev, 2118 dev_info(&priv->pdev->dev,
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 9b35d142f47a..8fb84e69c30e 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config NET_VENDOR_CAVIUM 5config NET_VENDOR_CAVIUM
6 tristate "Cavium ethernet drivers" 6 bool "Cavium ethernet drivers"
7 depends on PCI 7 depends on PCI
8 default y 8 default y
9 ---help--- 9 ---help---
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index b3a5947a2cc0..c561fdcb79a7 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -22,7 +22,6 @@
22 22
23struct nicpf { 23struct nicpf {
24 struct pci_dev *pdev; 24 struct pci_dev *pdev;
25 u8 rev_id;
26 u8 node; 25 u8 node;
27 unsigned int flags; 26 unsigned int flags;
28 u8 num_vf_en; /* No of VF enabled */ 27 u8 num_vf_en; /* No of VF enabled */
@@ -44,6 +43,7 @@ struct nicpf {
44 u8 duplex[MAX_LMAC]; 43 u8 duplex[MAX_LMAC];
45 u32 speed[MAX_LMAC]; 44 u32 speed[MAX_LMAC];
46 u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; 45 u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
46 u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
47 u16 rss_ind_tbl_size; 47 u16 rss_ind_tbl_size;
48 bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; 48 bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
49 49
@@ -54,6 +54,11 @@ struct nicpf {
54 bool irq_allocated[NIC_PF_MSIX_VECTORS]; 54 bool irq_allocated[NIC_PF_MSIX_VECTORS];
55}; 55};
56 56
57static inline bool pass1_silicon(struct nicpf *nic)
58{
59 return nic->pdev->revision < 8;
60}
61
57/* Supported devices */ 62/* Supported devices */
58static const struct pci_device_id nic_id_table[] = { 63static const struct pci_device_id nic_id_table[] = {
59 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) }, 64 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
@@ -117,7 +122,7 @@ static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
117 * when PF writes to MBOX(1), in next revisions when 122 * when PF writes to MBOX(1), in next revisions when
118 * PF writes to MBOX(0) 123 * PF writes to MBOX(0)
119 */ 124 */
120 if (nic->rev_id == 0) { 125 if (pass1_silicon(nic)) {
121 /* see the comment for nic_reg_write()/nic_reg_read() 126 /* see the comment for nic_reg_write()/nic_reg_read()
122 * functions above 127 * functions above
123 */ 128 */
@@ -305,9 +310,6 @@ static void nic_init_hw(struct nicpf *nic)
305{ 310{
306 int i; 311 int i;
307 312
308 /* Reset NIC, in case the driver is repeatedly inserted and removed */
309 nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
310
311 /* Enable NIC HW block */ 313 /* Enable NIC HW block */
312 nic_reg_write(nic, NIC_PF_CFG, 0x3); 314 nic_reg_write(nic, NIC_PF_CFG, 0x3);
313 315
@@ -395,8 +397,18 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
395 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ 397 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
396 398
397 /* Leave RSS_SIZE as '0' to disable RSS */ 399 /* Leave RSS_SIZE as '0' to disable RSS */
398 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 400 if (pass1_silicon(nic)) {
399 (vnic << 24) | (padd << 16) | (rssi_base + rssi)); 401 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
402 (vnic << 24) | (padd << 16) |
403 (rssi_base + rssi));
404 } else {
405 /* Set MPI_ALG to '0' to disable MCAM parsing */
406 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
407 (padd << 16));
408 /* MPI index is same as CPI if MPI_ALG is not enabled */
409 nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
410 (vnic << 24) | (rssi_base + rssi));
411 }
400 412
401 if ((rssi + 1) >= cfg->rq_cnt) 413 if ((rssi + 1) >= cfg->rq_cnt)
402 continue; 414 continue;
@@ -409,6 +421,7 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
409 rssi = ((cpi - cpi_base) & 0x38) >> 3; 421 rssi = ((cpi - cpi_base) & 0x38) >> 3;
410 } 422 }
411 nic->cpi_base[cfg->vf_id] = cpi_base; 423 nic->cpi_base[cfg->vf_id] = cpi_base;
424 nic->rssi_base[cfg->vf_id] = rssi_base;
412} 425}
413 426
414/* Responsds to VF with its RSS indirection table size */ 427/* Responsds to VF with its RSS indirection table size */
@@ -434,10 +447,9 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
434{ 447{
435 u8 qset, idx = 0; 448 u8 qset, idx = 0;
436 u64 cpi_cfg, cpi_base, rssi_base, rssi; 449 u64 cpi_cfg, cpi_base, rssi_base, rssi;
450 u64 idx_addr;
437 451
438 cpi_base = nic->cpi_base[cfg->vf_id]; 452 rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
439 cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
440 rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
441 453
442 rssi = rssi_base; 454 rssi = rssi_base;
443 qset = cfg->vf_id; 455 qset = cfg->vf_id;
@@ -454,9 +466,15 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
454 idx++; 466 idx++;
455 } 467 }
456 468
469 cpi_base = nic->cpi_base[cfg->vf_id];
470 if (pass1_silicon(nic))
471 idx_addr = NIC_PF_CPI_0_2047_CFG;
472 else
473 idx_addr = NIC_PF_MPI_0_2047_CFG;
474 cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
457 cpi_cfg &= ~(0xFULL << 20); 475 cpi_cfg &= ~(0xFULL << 20);
458 cpi_cfg |= (cfg->hash_bits << 20); 476 cpi_cfg |= (cfg->hash_bits << 20);
459 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg); 477 nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
460} 478}
461 479
462/* 4 level transmit side scheduler configutation 480/* 4 level transmit side scheduler configutation
@@ -1001,8 +1019,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1001 goto err_release_regions; 1019 goto err_release_regions;
1002 } 1020 }
1003 1021
1004 pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
1005
1006 nic->node = nic_get_node_id(pdev); 1022 nic->node = nic_get_node_id(pdev);
1007 1023
1008 nic_set_lmac_vf_mapping(nic); 1024 nic_set_lmac_vf_mapping(nic);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index 58197bb2f805..dd536be20193 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -85,7 +85,11 @@
85#define NIC_PF_ECC3_DBE_INT_W1S (0x2708) 85#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
86#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710) 86#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
87#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718) 87#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
88#define NIC_PF_MCAM_0_191_ENA (0x100000)
89#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000)
90#define NIC_PF_MCAM_CTRL (0x120000)
88#define NIC_PF_CPI_0_2047_CFG (0x200000) 91#define NIC_PF_CPI_0_2047_CFG (0x200000)
92#define NIC_PF_MPI_0_2047_CFG (0x210000)
89#define NIC_PF_RSSI_0_4097_RQ (0x220000) 93#define NIC_PF_RSSI_0_4097_RQ (0x220000)
90#define NIC_PF_LMAC_0_7_CFG (0x240000) 94#define NIC_PF_LMAC_0_7_CFG (0x240000)
91#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) 95#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index b63e579aeb12..a9377727c11c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -29,7 +29,7 @@
29static const struct pci_device_id nicvf_id_table[] = { 29static const struct pci_device_id nicvf_id_table[] = {
30 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 30 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
31 PCI_DEVICE_ID_THUNDER_NIC_VF, 31 PCI_DEVICE_ID_THUNDER_NIC_VF,
32 PCI_VENDOR_ID_CAVIUM, 0xA11E) }, 32 PCI_VENDOR_ID_CAVIUM, 0xA134) },
33 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 33 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
34 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, 34 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
35 PCI_VENDOR_ID_CAVIUM, 0xA11E) }, 35 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 574c49278900..180aa9fabf48 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -977,8 +977,10 @@ static int bgx_init_of_phy(struct bgx *bgx)
977 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); 977 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
978 bgx->lmac[lmac].lmacid = lmac; 978 bgx->lmac[lmac].lmacid = lmac;
979 lmac++; 979 lmac++;
980 if (lmac == MAX_LMAC_PER_BGX) 980 if (lmac == MAX_LMAC_PER_BGX) {
981 of_node_put(np_child);
981 break; 982 break;
983 }
982 } 984 }
983 return 0; 985 return 0;
984} 986}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 710715fcb23d..ce38d266f931 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -341,7 +341,7 @@ static void gfar_rx_offload_en(struct gfar_private *priv)
341 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) 341 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
342 priv->uses_rxfcb = 1; 342 priv->uses_rxfcb = 1;
343 343
344 if (priv->hwts_rx_en) 344 if (priv->hwts_rx_en || priv->rx_filer_enable)
345 priv->uses_rxfcb = 1; 345 priv->uses_rxfcb = 1;
346} 346}
347 347
@@ -351,7 +351,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
351 u32 rctrl = 0; 351 u32 rctrl = 0;
352 352
353 if (priv->rx_filer_enable) { 353 if (priv->rx_filer_enable) {
354 rctrl |= RCTRL_FILREN; 354 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
355 /* Program the RIR0 reg with the required distribution */ 355 /* Program the RIR0 reg with the required distribution */
356 if (priv->poll_mode == GFAR_SQ_POLLING) 356 if (priv->poll_mode == GFAR_SQ_POLLING)
357 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0); 357 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
@@ -3462,11 +3462,9 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
3462 netif_dbg(priv, tx_err, dev, "Transmit Error\n"); 3462 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3463 } 3463 }
3464 if (events & IEVENT_BSY) { 3464 if (events & IEVENT_BSY) {
3465 dev->stats.rx_errors++; 3465 dev->stats.rx_over_errors++;
3466 atomic64_inc(&priv->extra_stats.rx_bsy); 3466 atomic64_inc(&priv->extra_stats.rx_bsy);
3467 3467
3468 gfar_receive(irq, grp_id);
3469
3470 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", 3468 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3471 gfar_read(&regs->rstat)); 3469 gfar_read(&regs->rstat));
3472 } 3470 }
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 6bdc89179b72..a33e4a829601 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -676,14 +676,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
676 u32 fcr = 0x0, fpr = FPR_FILER_MASK; 676 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
677 677
678 if (ethflow & RXH_L2DA) { 678 if (ethflow & RXH_L2DA) {
679 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | 679 fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
680 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 680 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
681 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 681 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
682 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 682 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
683 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 683 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
684 priv->cur_filer_idx = priv->cur_filer_idx - 1; 684 priv->cur_filer_idx = priv->cur_filer_idx - 1;
685 685
686 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | 686 fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
687 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 687 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
688 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 688 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
689 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 689 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index e972b5ecbf0b..13a5d4cf494b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1344,6 +1344,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
1344 data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == 1344 data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
1345 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1345 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1346 } 1346 }
1347 for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
1348 data[i++] = veb->tc_stats.tc_tx_packets[j];
1349 data[i++] = veb->tc_stats.tc_tx_bytes[j];
1350 data[i++] = veb->tc_stats.tc_rx_packets[j];
1351 data[i++] = veb->tc_stats.tc_rx_bytes[j];
1352 }
1347 } 1353 }
1348 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { 1354 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
1349 p = (char *)pf + i40e_gstrings_stats[j].stat_offset; 1355 p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index dd44fafd8798..3dd26cdd0bf2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7911,6 +7911,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
7911 if (pf->hw.func_caps.vmdq) { 7911 if (pf->hw.func_caps.vmdq) {
7912 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 7912 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7913 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 7913 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7914 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
7914 } 7915 }
7915 7916
7916#ifdef I40E_FCOE 7917#ifdef I40E_FCOE
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 960169efe636..dfb6d5f79a10 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -759,11 +759,23 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
759 759
760 desc->l4i_chk = 0; 760 desc->l4i_chk = 0;
761 desc->byte_cnt = length; 761 desc->byte_cnt = length;
762 desc->buf_ptr = dma_map_single(dev->dev.parent, data, 762
763 length, DMA_TO_DEVICE); 763 if (length <= 8 && (uintptr_t)data & 0x7) {
764 if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) { 764 /* Copy unaligned small data fragment to TSO header data area */
765 WARN(1, "dma_map_single failed!\n"); 765 memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
766 return -ENOMEM; 766 data, length);
767 desc->buf_ptr = txq->tso_hdrs_dma
768 + txq->tx_curr_desc * TSO_HEADER_SIZE;
769 } else {
770 /* Alignment is okay, map buffer and hand off to hardware */
771 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
772 desc->buf_ptr = dma_map_single(dev->dev.parent, data,
773 length, DMA_TO_DEVICE);
774 if (unlikely(dma_mapping_error(dev->dev.parent,
775 desc->buf_ptr))) {
776 WARN(1, "dma_map_single failed!\n");
777 return -ENOMEM;
778 }
767 } 779 }
768 780
769 cmd_sts = BUFFER_OWNED_BY_DMA; 781 cmd_sts = BUFFER_OWNED_BY_DMA;
@@ -779,7 +791,8 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
779} 791}
780 792
781static inline void 793static inline void
782txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length) 794txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
795 u32 *first_cmd_sts, bool first_desc)
783{ 796{
784 struct mv643xx_eth_private *mp = txq_to_mp(txq); 797 struct mv643xx_eth_private *mp = txq_to_mp(txq);
785 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 798 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -788,6 +801,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
788 int ret; 801 int ret;
789 u32 cmd_csum = 0; 802 u32 cmd_csum = 0;
790 u16 l4i_chk = 0; 803 u16 l4i_chk = 0;
804 u32 cmd_sts;
791 805
792 tx_index = txq->tx_curr_desc; 806 tx_index = txq->tx_curr_desc;
793 desc = &txq->tx_desc_area[tx_index]; 807 desc = &txq->tx_desc_area[tx_index];
@@ -803,9 +817,17 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
803 desc->byte_cnt = hdr_len; 817 desc->byte_cnt = hdr_len;
804 desc->buf_ptr = txq->tso_hdrs_dma + 818 desc->buf_ptr = txq->tso_hdrs_dma +
805 txq->tx_curr_desc * TSO_HEADER_SIZE; 819 txq->tx_curr_desc * TSO_HEADER_SIZE;
806 desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | 820 cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
807 GEN_CRC; 821 GEN_CRC;
808 822
823 /* Defer updating the first command descriptor until all
824 * following descriptors have been written.
825 */
826 if (first_desc)
827 *first_cmd_sts = cmd_sts;
828 else
829 desc->cmd_sts = cmd_sts;
830
809 txq->tx_curr_desc++; 831 txq->tx_curr_desc++;
810 if (txq->tx_curr_desc == txq->tx_ring_size) 832 if (txq->tx_curr_desc == txq->tx_ring_size)
811 txq->tx_curr_desc = 0; 833 txq->tx_curr_desc = 0;
@@ -819,6 +841,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
819 int desc_count = 0; 841 int desc_count = 0;
820 struct tso_t tso; 842 struct tso_t tso;
821 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 843 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
844 struct tx_desc *first_tx_desc;
845 u32 first_cmd_sts = 0;
822 846
823 /* Count needed descriptors */ 847 /* Count needed descriptors */
824 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { 848 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
@@ -826,11 +850,14 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
826 return -EBUSY; 850 return -EBUSY;
827 } 851 }
828 852
853 first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
854
829 /* Initialize the TSO handler, and prepare the first payload */ 855 /* Initialize the TSO handler, and prepare the first payload */
830 tso_start(skb, &tso); 856 tso_start(skb, &tso);
831 857
832 total_len = skb->len - hdr_len; 858 total_len = skb->len - hdr_len;
833 while (total_len > 0) { 859 while (total_len > 0) {
860 bool first_desc = (desc_count == 0);
834 char *hdr; 861 char *hdr;
835 862
836 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 863 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
@@ -840,7 +867,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
840 /* prepare packet headers: MAC + IP + TCP */ 867 /* prepare packet headers: MAC + IP + TCP */
841 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; 868 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
842 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 869 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
843 txq_put_hdr_tso(skb, txq, data_left); 870 txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
871 first_desc);
844 872
845 while (data_left > 0) { 873 while (data_left > 0) {
846 int size; 874 int size;
@@ -860,6 +888,10 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
860 __skb_queue_tail(&txq->tx_skb, skb); 888 __skb_queue_tail(&txq->tx_skb, skb);
861 skb_tx_timestamp(skb); 889 skb_tx_timestamp(skb);
862 890
891 /* ensure all other descriptors are written before first cmd_sts */
892 wmb();
893 first_tx_desc->cmd_sts = first_cmd_sts;
894
863 /* clear TX_END status */ 895 /* clear TX_END status */
864 mp->work_tx_end &= ~(1 << txq->index); 896 mp->work_tx_end &= ~(1 << txq->index);
865 897
@@ -2785,8 +2817,10 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2785 2817
2786 for_each_available_child_of_node(np, pnp) { 2818 for_each_available_child_of_node(np, pnp) {
2787 ret = mv643xx_eth_shared_of_add_port(pdev, pnp); 2819 ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
2788 if (ret) 2820 if (ret) {
2821 of_node_put(pnp);
2789 return ret; 2822 return ret;
2823 }
2790 } 2824 }
2791 return 0; 2825 return 0;
2792} 2826}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 0a3202047569..2177e56ed0be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2398,7 +2398,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
2398 } 2398 }
2399 } 2399 }
2400 2400
2401 memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size); 2401 memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
2402 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; 2402 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2403 INIT_WORK(&priv->mfunc.master.comm_work, 2403 INIT_WORK(&priv->mfunc.master.comm_work,
2404 mlx4_master_comm_channel); 2404 mlx4_master_comm_channel);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 494e7762fdb1..4421bf5463f6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -964,6 +964,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
964 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN; 964 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
965 else if (vlan_proto == ETH_P_8021Q) 965 else if (vlan_proto == ETH_P_8021Q)
966 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; 966 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
967 else
968 tx_desc->ctrl.ins_vlan = 0;
967 969
968 tx_desc->ctrl.fence_size = real_size; 970 tx_desc->ctrl.fence_size = real_size;
969 971
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index c34488479365..603d1c3d3b2e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -196,7 +196,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
196 return; 196 return;
197 } 197 }
198 198
199 memcpy(s_eqe, eqe, dev->caps.eqe_size - 1); 199 memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
200 s_eqe->slave_id = slave; 200 s_eqe->slave_id = slave;
201 /* ensure all information is written before setting the ownersip bit */ 201 /* ensure all information is written before setting the ownersip bit */
202 dma_wmb(); 202 dma_wmb();
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index a41bb5e6b954..75e88f4c1531 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -4076,6 +4076,8 @@ static void nv_do_nic_poll(unsigned long data)
4076 struct fe_priv *np = netdev_priv(dev); 4076 struct fe_priv *np = netdev_priv(dev);
4077 u8 __iomem *base = get_hwbase(dev); 4077 u8 __iomem *base = get_hwbase(dev);
4078 u32 mask = 0; 4078 u32 mask = 0;
4079 unsigned long flags;
4080 unsigned int irq = 0;
4079 4081
4080 /* 4082 /*
4081 * First disable irq(s) and then 4083 * First disable irq(s) and then
@@ -4085,25 +4087,27 @@ static void nv_do_nic_poll(unsigned long data)
4085 4087
4086 if (!using_multi_irqs(dev)) { 4088 if (!using_multi_irqs(dev)) {
4087 if (np->msi_flags & NV_MSI_X_ENABLED) 4089 if (np->msi_flags & NV_MSI_X_ENABLED)
4088 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4090 irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector;
4089 else 4091 else
4090 disable_irq_lockdep(np->pci_dev->irq); 4092 irq = np->pci_dev->irq;
4091 mask = np->irqmask; 4093 mask = np->irqmask;
4092 } else { 4094 } else {
4093 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4095 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4094 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4096 irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector;
4095 mask |= NVREG_IRQ_RX_ALL; 4097 mask |= NVREG_IRQ_RX_ALL;
4096 } 4098 }
4097 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4099 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4098 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4100 irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector;
4099 mask |= NVREG_IRQ_TX_ALL; 4101 mask |= NVREG_IRQ_TX_ALL;
4100 } 4102 }
4101 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4103 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4102 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4104 irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector;
4103 mask |= NVREG_IRQ_OTHER; 4105 mask |= NVREG_IRQ_OTHER;
4104 } 4106 }
4105 } 4107 }
4106 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 4108
4109 disable_irq_nosync_lockdep_irqsave(irq, &flags);
4110 synchronize_irq(irq);
4107 4111
4108 if (np->recover_error) { 4112 if (np->recover_error) {
4109 np->recover_error = 0; 4113 np->recover_error = 0;
@@ -4156,28 +4160,22 @@ static void nv_do_nic_poll(unsigned long data)
4156 nv_nic_irq_optimized(0, dev); 4160 nv_nic_irq_optimized(0, dev);
4157 else 4161 else
4158 nv_nic_irq(0, dev); 4162 nv_nic_irq(0, dev);
4159 if (np->msi_flags & NV_MSI_X_ENABLED)
4160 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4161 else
4162 enable_irq_lockdep(np->pci_dev->irq);
4163 } else { 4163 } else {
4164 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4164 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4165 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; 4165 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4166 nv_nic_irq_rx(0, dev); 4166 nv_nic_irq_rx(0, dev);
4167 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4168 } 4167 }
4169 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4168 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4170 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; 4169 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4171 nv_nic_irq_tx(0, dev); 4170 nv_nic_irq_tx(0, dev);
4172 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4173 } 4171 }
4174 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4172 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4175 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; 4173 np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4176 nv_nic_irq_other(0, dev); 4174 nv_nic_irq_other(0, dev);
4177 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4178 } 4175 }
4179 } 4176 }
4180 4177
4178 enable_irq_lockdep_irqrestore(irq, &flags);
4181} 4179}
4182 4180
4183#ifdef CONFIG_NET_POLL_CONTROLLER 4181#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 257ea713b4c1..a484d8beb855 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1127,7 +1127,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1127 struct sh_eth_txdesc *txdesc = NULL; 1127 struct sh_eth_txdesc *txdesc = NULL;
1128 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1128 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1129 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1129 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1130 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1130 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1131 dma_addr_t dma_addr; 1131 dma_addr_t dma_addr;
1132 1132
1133 mdp->cur_rx = 0; 1133 mdp->cur_rx = 0;
@@ -1148,8 +1148,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
1148 1148
1149 /* RX descriptor */ 1149 /* RX descriptor */
1150 rxdesc = &mdp->rx_ring[i]; 1150 rxdesc = &mdp->rx_ring[i];
1151 /* The size of the buffer is a multiple of 16 bytes. */ 1151 /* The size of the buffer is a multiple of 32 bytes. */
1152 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1152 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
1153 dma_addr = dma_map_single(&ndev->dev, skb->data, 1153 dma_addr = dma_map_single(&ndev->dev, skb->data,
1154 rxdesc->buffer_length, 1154 rxdesc->buffer_length,
1155 DMA_FROM_DEVICE); 1155 DMA_FROM_DEVICE);
@@ -1450,7 +1450,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1450 struct sk_buff *skb; 1450 struct sk_buff *skb;
1451 u16 pkt_len = 0; 1451 u16 pkt_len = 0;
1452 u32 desc_status; 1452 u32 desc_status;
1453 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1453 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1454 dma_addr_t dma_addr; 1454 dma_addr_t dma_addr;
1455 1455
1456 boguscnt = min(boguscnt, *quota); 1456 boguscnt = min(boguscnt, *quota);
@@ -1506,7 +1506,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1506 if (mdp->cd->rpadir) 1506 if (mdp->cd->rpadir)
1507 skb_reserve(skb, NET_IP_ALIGN); 1507 skb_reserve(skb, NET_IP_ALIGN);
1508 dma_unmap_single(&ndev->dev, rxdesc->addr, 1508 dma_unmap_single(&ndev->dev, rxdesc->addr,
1509 ALIGN(mdp->rx_buf_sz, 16), 1509 ALIGN(mdp->rx_buf_sz, 32),
1510 DMA_FROM_DEVICE); 1510 DMA_FROM_DEVICE);
1511 skb_put(skb, pkt_len); 1511 skb_put(skb, pkt_len);
1512 skb->protocol = eth_type_trans(skb, ndev); 1512 skb->protocol = eth_type_trans(skb, ndev);
@@ -1524,8 +1524,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1524 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1524 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1525 entry = mdp->dirty_rx % mdp->num_rx_ring; 1525 entry = mdp->dirty_rx % mdp->num_rx_ring;
1526 rxdesc = &mdp->rx_ring[entry]; 1526 rxdesc = &mdp->rx_ring[entry];
1527 /* The size of the buffer is 16 byte boundary. */ 1527 /* The size of the buffer is 32 byte boundary. */
1528 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1528 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
1529 1529
1530 if (mdp->rx_skbuff[entry] == NULL) { 1530 if (mdp->rx_skbuff[entry] == NULL) {
1531 skb = netdev_alloc_skb(ndev, skbuff_size); 1531 skb = netdev_alloc_skb(ndev, skbuff_size);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 98d172b04f71..a9b9460de0d6 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -9,7 +9,7 @@
9 9
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/moduleparam.h> 11#include <linux/moduleparam.h>
12#include <asm/cmpxchg.h> 12#include <linux/atomic.h>
13#include "net_driver.h" 13#include "net_driver.h"
14#include "nic.h" 14#include "nic.h"
15#include "io.h" 15#include "io.h"
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index ad62615a93dc..c771e0af4e06 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -401,8 +401,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
401/* For Siena platforms NIC time is s and ns */ 401/* For Siena platforms NIC time is s and ns */
402static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) 402static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
403{ 403{
404 struct timespec ts = ns_to_timespec(ns); 404 struct timespec64 ts = ns_to_timespec64(ns);
405 *nic_major = ts.tv_sec; 405 *nic_major = (u32)ts.tv_sec;
406 *nic_minor = ts.tv_nsec; 406 *nic_minor = ts.tv_nsec;
407} 407}
408 408
@@ -431,8 +431,8 @@ static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
431 */ 431 */
432static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) 432static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
433{ 433{
434 struct timespec ts = ns_to_timespec(ns); 434 struct timespec64 ts = ns_to_timespec64(ns);
435 u32 maj = ts.tv_sec; 435 u32 maj = (u32)ts.tv_sec;
436 u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + 436 u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
437 (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); 437 (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
438 438
@@ -646,28 +646,28 @@ static void efx_ptp_send_times(struct efx_nic *efx,
646 struct pps_event_time *last_time) 646 struct pps_event_time *last_time)
647{ 647{
648 struct pps_event_time now; 648 struct pps_event_time now;
649 struct timespec limit; 649 struct timespec64 limit;
650 struct efx_ptp_data *ptp = efx->ptp_data; 650 struct efx_ptp_data *ptp = efx->ptp_data;
651 struct timespec start; 651 struct timespec64 start;
652 int *mc_running = ptp->start.addr; 652 int *mc_running = ptp->start.addr;
653 653
654 pps_get_ts(&now); 654 pps_get_ts(&now);
655 start = now.ts_real; 655 start = now.ts_real;
656 limit = now.ts_real; 656 limit = now.ts_real;
657 timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS); 657 timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
658 658
659 /* Write host time for specified period or until MC is done */ 659 /* Write host time for specified period or until MC is done */
660 while ((timespec_compare(&now.ts_real, &limit) < 0) && 660 while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
661 ACCESS_ONCE(*mc_running)) { 661 ACCESS_ONCE(*mc_running)) {
662 struct timespec update_time; 662 struct timespec64 update_time;
663 unsigned int host_time; 663 unsigned int host_time;
664 664
665 /* Don't update continuously to avoid saturating the PCIe bus */ 665 /* Don't update continuously to avoid saturating the PCIe bus */
666 update_time = now.ts_real; 666 update_time = now.ts_real;
667 timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); 667 timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
668 do { 668 do {
669 pps_get_ts(&now); 669 pps_get_ts(&now);
670 } while ((timespec_compare(&now.ts_real, &update_time) < 0) && 670 } while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
671 ACCESS_ONCE(*mc_running)); 671 ACCESS_ONCE(*mc_running));
672 672
673 /* Synchronise NIC with single word of time only */ 673 /* Synchronise NIC with single word of time only */
@@ -723,7 +723,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
723 struct efx_ptp_data *ptp = efx->ptp_data; 723 struct efx_ptp_data *ptp = efx->ptp_data;
724 u32 last_sec; 724 u32 last_sec;
725 u32 start_sec; 725 u32 start_sec;
726 struct timespec delta; 726 struct timespec64 delta;
727 ktime_t mc_time; 727 ktime_t mc_time;
728 728
729 if (number_readings == 0) 729 if (number_readings == 0)
@@ -737,14 +737,14 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
737 */ 737 */
738 for (i = 0; i < number_readings; i++) { 738 for (i = 0; i < number_readings; i++) {
739 s32 window, corrected; 739 s32 window, corrected;
740 struct timespec wait; 740 struct timespec64 wait;
741 741
742 efx_ptp_read_timeset( 742 efx_ptp_read_timeset(
743 MCDI_ARRAY_STRUCT_PTR(synch_buf, 743 MCDI_ARRAY_STRUCT_PTR(synch_buf,
744 PTP_OUT_SYNCHRONIZE_TIMESET, i), 744 PTP_OUT_SYNCHRONIZE_TIMESET, i),
745 &ptp->timeset[i]); 745 &ptp->timeset[i]);
746 746
747 wait = ktime_to_timespec( 747 wait = ktime_to_timespec64(
748 ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); 748 ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
749 window = ptp->timeset[i].window; 749 window = ptp->timeset[i].window;
750 corrected = window - wait.tv_nsec; 750 corrected = window - wait.tv_nsec;
@@ -803,7 +803,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
803 ptp->timeset[last_good].minor, 0); 803 ptp->timeset[last_good].minor, 0);
804 804
805 /* Calculate delay from NIC top of second to last_time */ 805 /* Calculate delay from NIC top of second to last_time */
806 delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec; 806 delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec;
807 807
808 /* Set PPS timestamp to match NIC top of second */ 808 /* Set PPS timestamp to match NIC top of second */
809 ptp->host_time_pps = *last_time; 809 ptp->host_time_pps = *last_time;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 8fc90f1c872c..874fb297e96c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -30,6 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/pm_runtime.h> 31#include <linux/pm_runtime.h>
32#include <linux/of.h> 32#include <linux/of.h>
33#include <linux/of_mdio.h>
33#include <linux/of_net.h> 34#include <linux/of_net.h>
34#include <linux/of_device.h> 35#include <linux/of_device.h>
35#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
@@ -365,6 +366,7 @@ struct cpsw_priv {
365 spinlock_t lock; 366 spinlock_t lock;
366 struct platform_device *pdev; 367 struct platform_device *pdev;
367 struct net_device *ndev; 368 struct net_device *ndev;
369 struct device_node *phy_node;
368 struct napi_struct napi_rx; 370 struct napi_struct napi_rx;
369 struct napi_struct napi_tx; 371 struct napi_struct napi_tx;
370 struct device *dev; 372 struct device *dev;
@@ -1145,7 +1147,11 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1145 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1147 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1146 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 1148 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1147 1149
1148 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 1150 if (priv->phy_node)
1151 slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
1152 &cpsw_adjust_link, 0, slave->data->phy_if);
1153 else
1154 slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
1149 &cpsw_adjust_link, slave->data->phy_if); 1155 &cpsw_adjust_link, slave->data->phy_if);
1150 if (IS_ERR(slave->phy)) { 1156 if (IS_ERR(slave->phy)) {
1151 dev_err(priv->dev, "phy %s not found on slave %d\n", 1157 dev_err(priv->dev, "phy %s not found on slave %d\n",
@@ -1934,11 +1940,12 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
1934 slave->port_vlan = data->dual_emac_res_vlan; 1940 slave->port_vlan = data->dual_emac_res_vlan;
1935} 1941}
1936 1942
1937static int cpsw_probe_dt(struct cpsw_platform_data *data, 1943static int cpsw_probe_dt(struct cpsw_priv *priv,
1938 struct platform_device *pdev) 1944 struct platform_device *pdev)
1939{ 1945{
1940 struct device_node *node = pdev->dev.of_node; 1946 struct device_node *node = pdev->dev.of_node;
1941 struct device_node *slave_node; 1947 struct device_node *slave_node;
1948 struct cpsw_platform_data *data = &priv->data;
1942 int i = 0, ret; 1949 int i = 0, ret;
1943 u32 prop; 1950 u32 prop;
1944 1951
@@ -2029,6 +2036,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2029 if (strcmp(slave_node->name, "slave")) 2036 if (strcmp(slave_node->name, "slave"))
2030 continue; 2037 continue;
2031 2038
2039 priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
2032 parp = of_get_property(slave_node, "phy_id", &lenp); 2040 parp = of_get_property(slave_node, "phy_id", &lenp);
2033 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 2041 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
2034 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); 2042 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
@@ -2044,7 +2052,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2044 } 2052 }
2045 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2053 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2046 PHY_ID_FMT, mdio->name, phyid); 2054 PHY_ID_FMT, mdio->name, phyid);
2047
2048 slave_data->phy_if = of_get_phy_mode(slave_node); 2055 slave_data->phy_if = of_get_phy_mode(slave_node);
2049 if (slave_data->phy_if < 0) { 2056 if (slave_data->phy_if < 0) {
2050 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", 2057 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
@@ -2240,7 +2247,7 @@ static int cpsw_probe(struct platform_device *pdev)
2240 /* Select default pin state */ 2247 /* Select default pin state */
2241 pinctrl_pm_select_default_state(&pdev->dev); 2248 pinctrl_pm_select_default_state(&pdev->dev);
2242 2249
2243 if (cpsw_probe_dt(&priv->data, pdev)) { 2250 if (cpsw_probe_dt(priv, pdev)) {
2244 dev_err(&pdev->dev, "cpsw: platform data missing\n"); 2251 dev_err(&pdev->dev, "cpsw: platform data missing\n");
2245 ret = -ENODEV; 2252 ret = -ENODEV;
2246 goto clean_runtime_disable_ret; 2253 goto clean_runtime_disable_ret;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 6bff8d82ceab..4e70e7586a09 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2637,8 +2637,10 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
2637 mac_phy_link = true; 2637 mac_phy_link = true;
2638 2638
2639 slave->open = true; 2639 slave->open = true;
2640 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 2640 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
2641 of_node_put(port);
2641 break; 2642 break;
2643 }
2642 } 2644 }
2643 2645
2644 /* of_phy_connect() is needed only for MAC-PHY interface */ 2646 /* of_phy_connect() is needed only for MAC-PHY interface */
@@ -3137,8 +3139,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3137 continue; 3139 continue;
3138 } 3140 }
3139 gbe_dev->num_slaves++; 3141 gbe_dev->num_slaves++;
3140 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 3142 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3143 of_node_put(interface);
3141 break; 3144 break;
3145 }
3142 } 3146 }
3143 of_node_put(interfaces); 3147 of_node_put(interfaces);
3144 3148
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index cde29f8a37bf..445071c163cb 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -594,14 +594,12 @@ static struct rtable *geneve_get_rt(struct sk_buff *skb,
594 rt = ip_route_output_key(geneve->net, fl4); 594 rt = ip_route_output_key(geneve->net, fl4);
595 if (IS_ERR(rt)) { 595 if (IS_ERR(rt)) {
596 netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); 596 netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
597 dev->stats.tx_carrier_errors++; 597 return ERR_PTR(-ENETUNREACH);
598 return rt;
599 } 598 }
600 if (rt->dst.dev == dev) { /* is this necessary? */ 599 if (rt->dst.dev == dev) { /* is this necessary? */
601 netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); 600 netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
602 dev->stats.collisions++;
603 ip_rt_put(rt); 601 ip_rt_put(rt);
604 return ERR_PTR(-EINVAL); 602 return ERR_PTR(-ELOOP);
605 } 603 }
606 return rt; 604 return rt;
607} 605}
@@ -627,12 +625,12 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
627 struct ip_tunnel_info *info = NULL; 625 struct ip_tunnel_info *info = NULL;
628 struct rtable *rt = NULL; 626 struct rtable *rt = NULL;
629 const struct iphdr *iip; /* interior IP header */ 627 const struct iphdr *iip; /* interior IP header */
628 int err = -EINVAL;
630 struct flowi4 fl4; 629 struct flowi4 fl4;
631 __u8 tos, ttl; 630 __u8 tos, ttl;
632 __be16 sport; 631 __be16 sport;
633 bool udp_csum; 632 bool udp_csum;
634 __be16 df; 633 __be16 df;
635 int err;
636 634
637 if (geneve->collect_md) { 635 if (geneve->collect_md) {
638 info = skb_tunnel_info(skb); 636 info = skb_tunnel_info(skb);
@@ -647,7 +645,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
647 rt = geneve_get_rt(skb, dev, &fl4, info); 645 rt = geneve_get_rt(skb, dev, &fl4, info);
648 if (IS_ERR(rt)) { 646 if (IS_ERR(rt)) {
649 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); 647 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
650 dev->stats.tx_carrier_errors++; 648 err = PTR_ERR(rt);
651 goto tx_error; 649 goto tx_error;
652 } 650 }
653 651
@@ -699,10 +697,37 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
699tx_error: 697tx_error:
700 dev_kfree_skb(skb); 698 dev_kfree_skb(skb);
701err: 699err:
702 dev->stats.tx_errors++; 700 if (err == -ELOOP)
701 dev->stats.collisions++;
702 else if (err == -ENETUNREACH)
703 dev->stats.tx_carrier_errors++;
704 else
705 dev->stats.tx_errors++;
703 return NETDEV_TX_OK; 706 return NETDEV_TX_OK;
704} 707}
705 708
709static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
710{
711 struct ip_tunnel_info *info = skb_tunnel_info(skb);
712 struct geneve_dev *geneve = netdev_priv(dev);
713 struct rtable *rt;
714 struct flowi4 fl4;
715
716 if (ip_tunnel_info_af(info) != AF_INET)
717 return -EINVAL;
718
719 rt = geneve_get_rt(skb, dev, &fl4, info);
720 if (IS_ERR(rt))
721 return PTR_ERR(rt);
722
723 ip_rt_put(rt);
724 info->key.u.ipv4.src = fl4.saddr;
725 info->key.tp_src = udp_flow_src_port(geneve->net, skb,
726 1, USHRT_MAX, true);
727 info->key.tp_dst = geneve->dst_port;
728 return 0;
729}
730
706static const struct net_device_ops geneve_netdev_ops = { 731static const struct net_device_ops geneve_netdev_ops = {
707 .ndo_init = geneve_init, 732 .ndo_init = geneve_init,
708 .ndo_uninit = geneve_uninit, 733 .ndo_uninit = geneve_uninit,
@@ -713,6 +738,7 @@ static const struct net_device_ops geneve_netdev_ops = {
713 .ndo_change_mtu = eth_change_mtu, 738 .ndo_change_mtu = eth_change_mtu,
714 .ndo_validate_addr = eth_validate_addr, 739 .ndo_validate_addr = eth_validate_addr,
715 .ndo_set_mac_address = eth_mac_addr, 740 .ndo_set_mac_address = eth_mac_addr,
741 .ndo_fill_metadata_dst = geneve_fill_metadata_dst,
716}; 742};
717 743
718static void geneve_get_drvinfo(struct net_device *dev, 744static void geneve_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 248478c6f6e4..197c93937c2d 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -137,7 +137,7 @@ static const struct proto_ops macvtap_socket_ops;
137#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 137#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
138 NETIF_F_TSO6 | NETIF_F_UFO) 138 NETIF_F_TSO6 | NETIF_F_UFO)
139#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 139#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
140#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 140#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
141 141
142static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) 142static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
143{ 143{
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 11e3975485c1..436972b2a746 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -122,6 +122,11 @@ config MICREL_PHY
122 ---help--- 122 ---help---
123 Supports the KSZ9021, VSC8201, KS8001 PHYs. 123 Supports the KSZ9021, VSC8201, KS8001 PHYs.
124 124
125config DP83848_PHY
126 tristate "Driver for Texas Instruments DP83848 PHY"
127 ---help---
128 Supports the DP83848 PHY.
129
125config DP83867_PHY 130config DP83867_PHY
126 tristate "Drivers for Texas Instruments DP83867 Gigabit PHY" 131 tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
127 ---help--- 132 ---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 87f079c4b2c7..b74822463930 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
24obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o 24obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
25obj-$(CONFIG_NATIONAL_PHY) += national.o 25obj-$(CONFIG_NATIONAL_PHY) += national.o
26obj-$(CONFIG_DP83640_PHY) += dp83640.o 26obj-$(CONFIG_DP83640_PHY) += dp83640.o
27obj-$(CONFIG_DP83848_PHY) += dp83848.o
27obj-$(CONFIG_DP83867_PHY) += dp83867.o 28obj-$(CONFIG_DP83867_PHY) += dp83867.o
28obj-$(CONFIG_STE10XP) += ste10Xp.o 29obj-$(CONFIG_STE10XP) += ste10Xp.o
29obj-$(CONFIG_MICREL_PHY) += micrel.o 30obj-$(CONFIG_MICREL_PHY) += micrel.o
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
new file mode 100644
index 000000000000..5ce9bef54468
--- /dev/null
+++ b/drivers/net/phy/dp83848.c
@@ -0,0 +1,99 @@
1/*
2 * Driver for the Texas Instruments DP83848 PHY
3 *
4 * Copyright (C) 2015 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/phy.h>
18
19#define DP83848_PHY_ID 0x20005c90
20
21/* Registers */
22#define DP83848_MICR 0x11
23#define DP83848_MISR 0x12
24
25/* MICR Register Fields */
26#define DP83848_MICR_INT_OE BIT(0) /* Interrupt Output Enable */
27#define DP83848_MICR_INTEN BIT(1) /* Interrupt Enable */
28
29/* MISR Register Fields */
30#define DP83848_MISR_RHF_INT_EN BIT(0) /* Receive Error Counter */
31#define DP83848_MISR_FHF_INT_EN BIT(1) /* False Carrier Counter */
32#define DP83848_MISR_ANC_INT_EN BIT(2) /* Auto-negotiation complete */
33#define DP83848_MISR_DUP_INT_EN BIT(3) /* Duplex Status */
34#define DP83848_MISR_SPD_INT_EN BIT(4) /* Speed status */
35#define DP83848_MISR_LINK_INT_EN BIT(5) /* Link status */
36#define DP83848_MISR_ED_INT_EN BIT(6) /* Energy detect */
37#define DP83848_MISR_LQM_INT_EN BIT(7) /* Link Quality Monitor */
38
39static int dp83848_ack_interrupt(struct phy_device *phydev)
40{
41 int err = phy_read(phydev, DP83848_MISR);
42
43 return err < 0 ? err : 0;
44}
45
46static int dp83848_config_intr(struct phy_device *phydev)
47{
48 int err;
49
50 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
51 err = phy_write(phydev, DP83848_MICR,
52 DP83848_MICR_INT_OE |
53 DP83848_MICR_INTEN);
54 if (err < 0)
55 return err;
56
57 return phy_write(phydev, DP83848_MISR,
58 DP83848_MISR_ANC_INT_EN |
59 DP83848_MISR_DUP_INT_EN |
60 DP83848_MISR_SPD_INT_EN |
61 DP83848_MISR_LINK_INT_EN);
62 }
63
64 return phy_write(phydev, DP83848_MICR, 0x0);
65}
66
67static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
68 { DP83848_PHY_ID, 0xfffffff0 },
69 { }
70};
71MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
72
73static struct phy_driver dp83848_driver[] = {
74 {
75 .phy_id = DP83848_PHY_ID,
76 .phy_id_mask = 0xfffffff0,
77 .name = "TI DP83848",
78 .features = PHY_BASIC_FEATURES,
79 .flags = PHY_HAS_INTERRUPT,
80
81 .soft_reset = genphy_soft_reset,
82 .config_init = genphy_config_init,
83 .suspend = genphy_suspend,
84 .resume = genphy_resume,
85 .config_aneg = genphy_config_aneg,
86 .read_status = genphy_read_status,
87
88 /* IRQ related */
89 .ack_interrupt = dp83848_ack_interrupt,
90 .config_intr = dp83848_config_intr,
91
92 .driver = { .owner = THIS_MODULE, },
93 },
94};
95module_phy_driver(dp83848_driver);
96
97MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver");
98MODULE_AUTHOR("Andrew F. Davis <afd@ti.com");
99MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 2377c1341172..7fde454fbc4f 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -113,12 +113,14 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
113 if (!iprop || len != sizeof(uint32_t)) { 113 if (!iprop || len != sizeof(uint32_t)) {
114 dev_err(&pdev->dev, "mdio-mux child node %s is " 114 dev_err(&pdev->dev, "mdio-mux child node %s is "
115 "missing a 'reg' property\n", np2->full_name); 115 "missing a 'reg' property\n", np2->full_name);
116 of_node_put(np2);
116 return -ENODEV; 117 return -ENODEV;
117 } 118 }
118 if (be32_to_cpup(iprop) & ~s->mask) { 119 if (be32_to_cpup(iprop) & ~s->mask) {
119 dev_err(&pdev->dev, "mdio-mux child node %s has " 120 dev_err(&pdev->dev, "mdio-mux child node %s has "
120 "a 'reg' value with unmasked bits\n", 121 "a 'reg' value with unmasked bits\n",
121 np2->full_name); 122 np2->full_name);
123 of_node_put(np2);
122 return -ENODEV; 124 return -ENODEV;
123 } 125 }
124 } 126 }
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 280c7c311f72..908e8d486342 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -144,6 +144,7 @@ int mdio_mux_init(struct device *dev,
144 dev_err(dev, 144 dev_err(dev,
145 "Error: Failed to allocate memory for child\n"); 145 "Error: Failed to allocate memory for child\n");
146 ret_val = -ENOMEM; 146 ret_val = -ENOMEM;
147 of_node_put(child_bus_node);
147 break; 148 break;
148 } 149 }
149 cb->bus_number = v; 150 cb->bus_number = v;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 499185eaf413..cf6312fafea5 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -514,6 +514,27 @@ static int ksz8873mll_read_status(struct phy_device *phydev)
514 return 0; 514 return 0;
515} 515}
516 516
517static int ksz9031_read_status(struct phy_device *phydev)
518{
519 int err;
520 int regval;
521
522 err = genphy_read_status(phydev);
523 if (err)
524 return err;
525
526 /* Make sure the PHY is not broken. Read idle error count,
527 * and reset the PHY if it is maxed out.
528 */
529 regval = phy_read(phydev, MII_STAT1000);
530 if ((regval & 0xFF) == 0xFF) {
531 phy_init_hw(phydev);
532 phydev->link = 0;
533 }
534
535 return 0;
536}
537
517static int ksz8873mll_config_aneg(struct phy_device *phydev) 538static int ksz8873mll_config_aneg(struct phy_device *phydev)
518{ 539{
519 return 0; 540 return 0;
@@ -772,7 +793,7 @@ static struct phy_driver ksphy_driver[] = {
772 .driver_data = &ksz9021_type, 793 .driver_data = &ksz9021_type,
773 .config_init = ksz9031_config_init, 794 .config_init = ksz9031_config_init,
774 .config_aneg = genphy_config_aneg, 795 .config_aneg = genphy_config_aneg,
775 .read_status = genphy_read_status, 796 .read_status = ksz9031_read_status,
776 .ack_interrupt = kszphy_ack_interrupt, 797 .ack_interrupt = kszphy_ack_interrupt,
777 .config_intr = kszphy_config_intr, 798 .config_intr = kszphy_config_intr,
778 .suspend = genphy_suspend, 799 .suspend = genphy_suspend,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 70b08958763a..dc2da8770918 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -43,16 +43,25 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
43 43
44static int smsc_phy_config_init(struct phy_device *phydev) 44static int smsc_phy_config_init(struct phy_device *phydev)
45{ 45{
46 int __maybe_unused len;
47 struct device *dev __maybe_unused = &phydev->dev;
48 struct device_node *of_node __maybe_unused = dev->of_node;
46 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); 49 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
50 int enable_energy = 1;
47 51
48 if (rc < 0) 52 if (rc < 0)
49 return rc; 53 return rc;
50 54
51 /* Enable energy detect mode for this SMSC Transceivers */ 55 if (of_find_property(of_node, "smsc,disable-energy-detect", &len))
52 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, 56 enable_energy = 0;
53 rc | MII_LAN83C185_EDPWRDOWN); 57
54 if (rc < 0) 58 if (enable_energy) {
55 return rc; 59 /* Enable energy detect mode for this SMSC Transceivers */
60 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
61 rc | MII_LAN83C185_EDPWRDOWN);
62 if (rc < 0)
63 return rc;
64 }
56 65
57 return smsc_phy_ack_interrupt(phydev); 66 return smsc_phy_ack_interrupt(phydev);
58} 67}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ed75060da50..5e0b43283bce 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -589,7 +589,7 @@ static int pppoe_release(struct socket *sock)
589 589
590 po = pppox_sk(sk); 590 po = pppox_sk(sk);
591 591
592 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { 592 if (po->pppoe_dev) {
593 dev_put(po->pppoe_dev); 593 dev_put(po->pppoe_dev);
594 po->pppoe_dev = NULL; 594 po->pppoe_dev = NULL;
595 } 595 }
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 355842b85ee9..2a7c1be23c4f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -765,6 +765,10 @@ static const struct usb_device_id products[] = {
765 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 765 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
766 {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, 766 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
767 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 767 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
768 {QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */
769 {QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */
770 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
771 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
768 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 772 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
769 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 773 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
770 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 774 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index afdc65fd5bc5..c1587ece28cf 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2337,6 +2337,46 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2337 return 0; 2337 return 0;
2338} 2338}
2339 2339
2340static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
2341 struct ip_tunnel_info *info,
2342 __be16 sport, __be16 dport)
2343{
2344 struct vxlan_dev *vxlan = netdev_priv(dev);
2345 struct rtable *rt;
2346 struct flowi4 fl4;
2347
2348 memset(&fl4, 0, sizeof(fl4));
2349 fl4.flowi4_tos = RT_TOS(info->key.tos);
2350 fl4.flowi4_mark = skb->mark;
2351 fl4.flowi4_proto = IPPROTO_UDP;
2352 fl4.daddr = info->key.u.ipv4.dst;
2353
2354 rt = ip_route_output_key(vxlan->net, &fl4);
2355 if (IS_ERR(rt))
2356 return PTR_ERR(rt);
2357 ip_rt_put(rt);
2358
2359 info->key.u.ipv4.src = fl4.saddr;
2360 info->key.tp_src = sport;
2361 info->key.tp_dst = dport;
2362 return 0;
2363}
2364
2365static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2366{
2367 struct vxlan_dev *vxlan = netdev_priv(dev);
2368 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2369 __be16 sport, dport;
2370
2371 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2372 vxlan->cfg.port_max, true);
2373 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2374
2375 if (ip_tunnel_info_af(info) == AF_INET)
2376 return egress_ipv4_tun_info(dev, skb, info, sport, dport);
2377 return -EINVAL;
2378}
2379
2340static const struct net_device_ops vxlan_netdev_ops = { 2380static const struct net_device_ops vxlan_netdev_ops = {
2341 .ndo_init = vxlan_init, 2381 .ndo_init = vxlan_init,
2342 .ndo_uninit = vxlan_uninit, 2382 .ndo_uninit = vxlan_uninit,
@@ -2351,6 +2391,7 @@ static const struct net_device_ops vxlan_netdev_ops = {
2351 .ndo_fdb_add = vxlan_fdb_add, 2391 .ndo_fdb_add = vxlan_fdb_add,
2352 .ndo_fdb_del = vxlan_fdb_delete, 2392 .ndo_fdb_del = vxlan_fdb_delete,
2353 .ndo_fdb_dump = vxlan_fdb_dump, 2393 .ndo_fdb_dump = vxlan_fdb_dump,
2394 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2354}; 2395};
2355 2396
2356/* Info for udev, that this is a virtual tunnel endpoint */ 2397/* Info for udev, that this is a virtual tunnel endpoint */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 6e473fa4b13c..12241b1c57cd 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -715,6 +715,7 @@ static bool check_device_tree(struct ath6kl *ar)
715 board_filename, ret); 715 board_filename, ret);
716 continue; 716 continue;
717 } 717 }
718 of_node_put(node);
718 return true; 719 return true;
719 } 720 }
720 return false; 721 return false;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f821a97d7827..6febc053a37f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1706,19 +1706,19 @@ static void xennet_destroy_queues(struct netfront_info *info)
1706} 1706}
1707 1707
1708static int xennet_create_queues(struct netfront_info *info, 1708static int xennet_create_queues(struct netfront_info *info,
1709 unsigned int num_queues) 1709 unsigned int *num_queues)
1710{ 1710{
1711 unsigned int i; 1711 unsigned int i;
1712 int ret; 1712 int ret;
1713 1713
1714 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), 1714 info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1715 GFP_KERNEL); 1715 GFP_KERNEL);
1716 if (!info->queues) 1716 if (!info->queues)
1717 return -ENOMEM; 1717 return -ENOMEM;
1718 1718
1719 rtnl_lock(); 1719 rtnl_lock();
1720 1720
1721 for (i = 0; i < num_queues; i++) { 1721 for (i = 0; i < *num_queues; i++) {
1722 struct netfront_queue *queue = &info->queues[i]; 1722 struct netfront_queue *queue = &info->queues[i];
1723 1723
1724 queue->id = i; 1724 queue->id = i;
@@ -1728,7 +1728,7 @@ static int xennet_create_queues(struct netfront_info *info,
1728 if (ret < 0) { 1728 if (ret < 0) {
1729 dev_warn(&info->netdev->dev, 1729 dev_warn(&info->netdev->dev,
1730 "only created %d queues\n", i); 1730 "only created %d queues\n", i);
1731 num_queues = i; 1731 *num_queues = i;
1732 break; 1732 break;
1733 } 1733 }
1734 1734
@@ -1738,11 +1738,11 @@ static int xennet_create_queues(struct netfront_info *info,
1738 napi_enable(&queue->napi); 1738 napi_enable(&queue->napi);
1739 } 1739 }
1740 1740
1741 netif_set_real_num_tx_queues(info->netdev, num_queues); 1741 netif_set_real_num_tx_queues(info->netdev, *num_queues);
1742 1742
1743 rtnl_unlock(); 1743 rtnl_unlock();
1744 1744
1745 if (num_queues == 0) { 1745 if (*num_queues == 0) {
1746 dev_err(&info->netdev->dev, "no queues\n"); 1746 dev_err(&info->netdev->dev, "no queues\n");
1747 return -EINVAL; 1747 return -EINVAL;
1748 } 1748 }
@@ -1788,7 +1788,7 @@ static int talk_to_netback(struct xenbus_device *dev,
1788 if (info->queues) 1788 if (info->queues)
1789 xennet_destroy_queues(info); 1789 xennet_destroy_queues(info);
1790 1790
1791 err = xennet_create_queues(info, num_queues); 1791 err = xennet_create_queues(info, &num_queues);
1792 if (err < 0) 1792 if (err < 0)
1793 goto destroy_ring; 1793 goto destroy_ring;
1794 1794
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 55317fa9c9dc..0baf626da56a 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -579,22 +579,187 @@ err:
579 } 579 }
580} 580}
581 581
582static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
583 u32 rid_in)
584{
585 struct device *parent_dev;
586 struct device_node *msi_controller_node;
587 struct device_node *msi_np = *np;
588 u32 map_mask, masked_rid, rid_base, msi_base, rid_len, phandle;
589 int msi_map_len;
590 bool matched;
591 u32 rid_out = rid_in;
592 const __be32 *msi_map = NULL;
593
594 /*
595 * Walk up the device parent links looking for one with a
596 * "msi-map" property.
597 */
598 for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) {
599 if (!parent_dev->of_node)
600 continue;
601
602 msi_map = of_get_property(parent_dev->of_node,
603 "msi-map", &msi_map_len);
604 if (!msi_map)
605 continue;
606
607 if (msi_map_len % (4 * sizeof(__be32))) {
608 dev_err(parent_dev, "Error: Bad msi-map length: %d\n",
609 msi_map_len);
610 return rid_out;
611 }
612 /* We have a good parent_dev and msi_map, let's use them. */
613 break;
614 }
615 if (!msi_map)
616 return rid_out;
617
618 /* The default is to select all bits. */
619 map_mask = 0xffffffff;
620
621 /*
622 * Can be overridden by "msi-map-mask" property. If
623 * of_property_read_u32() fails, the default is used.
624 */
625 of_property_read_u32(parent_dev->of_node, "msi-map-mask", &map_mask);
626
627 masked_rid = map_mask & rid_in;
628 matched = false;
629 while (!matched && msi_map_len >= 4 * sizeof(__be32)) {
630 rid_base = be32_to_cpup(msi_map + 0);
631 phandle = be32_to_cpup(msi_map + 1);
632 msi_base = be32_to_cpup(msi_map + 2);
633 rid_len = be32_to_cpup(msi_map + 3);
634
635 msi_controller_node = of_find_node_by_phandle(phandle);
636
637 matched = (masked_rid >= rid_base &&
638 masked_rid < rid_base + rid_len);
639 if (msi_np)
640 matched &= msi_np == msi_controller_node;
641
642 if (matched && !msi_np) {
643 *np = msi_np = msi_controller_node;
644 break;
645 }
646
647 of_node_put(msi_controller_node);
648 msi_map_len -= 4 * sizeof(__be32);
649 msi_map += 4;
650 }
651 if (!matched)
652 return rid_out;
653
654 rid_out = masked_rid + msi_base;
655 dev_dbg(dev,
656 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
657 dev_name(parent_dev), map_mask, rid_base, msi_base,
658 rid_len, rid_in, rid_out);
659
660 return rid_out;
661}
662
582/** 663/**
583 * of_msi_configure - Set the msi_domain field of a device 664 * of_msi_map_rid - Map a MSI requester ID for a device.
584 * @dev: device structure to associate with an MSI irq domain 665 * @dev: device for which the mapping is to be done.
585 * @np: device node for that device 666 * @msi_np: device node of the expected msi controller.
667 * @rid_in: unmapped MSI requester ID for the device.
668 *
669 * Walk up the device hierarchy looking for devices with a "msi-map"
670 * property. If found, apply the mapping to @rid_in.
671 *
672 * Returns the mapped MSI requester ID.
586 */ 673 */
587void of_msi_configure(struct device *dev, struct device_node *np) 674u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in)
675{
676 return __of_msi_map_rid(dev, &msi_np, rid_in);
677}
678
679static struct irq_domain *__of_get_msi_domain(struct device_node *np,
680 enum irq_domain_bus_token token)
681{
682 struct irq_domain *d;
683
684 d = irq_find_matching_host(np, token);
685 if (!d)
686 d = irq_find_host(np);
687
688 return d;
689}
690
691/**
692 * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
693 * @dev: device for which the mapping is to be done.
694 * @rid: Requester ID for the device.
695 *
696 * Walk up the device hierarchy looking for devices with a "msi-map"
697 * property.
698 *
699 * Returns: the MSI domain for this device (or NULL on failure)
700 */
701struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid)
702{
703 struct device_node *np = NULL;
704
705 __of_msi_map_rid(dev, &np, rid);
706 return __of_get_msi_domain(np, DOMAIN_BUS_PCI_MSI);
707}
708
709/**
710 * of_msi_get_domain - Use msi-parent to find the relevant MSI domain
711 * @dev: device for which the domain is requested
712 * @np: device node for @dev
713 * @token: bus type for this domain
714 *
715 * Parse the msi-parent property (both the simple and the complex
716 * versions), and returns the corresponding MSI domain.
717 *
718 * Returns: the MSI domain for this device (or NULL on failure).
719 */
720struct irq_domain *of_msi_get_domain(struct device *dev,
721 struct device_node *np,
722 enum irq_domain_bus_token token)
588{ 723{
589 struct device_node *msi_np; 724 struct device_node *msi_np;
590 struct irq_domain *d; 725 struct irq_domain *d;
591 726
727 /* Check for a single msi-parent property */
592 msi_np = of_parse_phandle(np, "msi-parent", 0); 728 msi_np = of_parse_phandle(np, "msi-parent", 0);
593 if (!msi_np) 729 if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) {
594 return; 730 d = __of_get_msi_domain(msi_np, token);
731 if (!d)
732 of_node_put(msi_np);
733 return d;
734 }
595 735
596 d = irq_find_matching_host(msi_np, DOMAIN_BUS_PLATFORM_MSI); 736 if (token == DOMAIN_BUS_PLATFORM_MSI) {
597 if (!d) 737 /* Check for the complex msi-parent version */
598 d = irq_find_host(msi_np); 738 struct of_phandle_args args;
599 dev_set_msi_domain(dev, d); 739 int index = 0;
740
741 while (!of_parse_phandle_with_args(np, "msi-parent",
742 "#msi-cells",
743 index, &args)) {
744 d = __of_get_msi_domain(args.np, token);
745 if (d)
746 return d;
747
748 of_node_put(args.np);
749 index++;
750 }
751 }
752
753 return NULL;
754}
755
756/**
757 * of_msi_configure - Set the msi_domain field of a device
758 * @dev: device structure to associate with an MSI irq domain
759 * @np: device node for that device
760 */
761void of_msi_configure(struct device *dev, struct device_node *np)
762{
763 dev_set_msi_domain(dev,
764 of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI));
600} 765}
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index e491681daf22..a6456b578269 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -256,7 +256,7 @@ static int xgene_allocate_domains(struct xgene_msi *msi)
256 if (!msi->inner_domain) 256 if (!msi->inner_domain)
257 return -ENOMEM; 257 return -ENOMEM;
258 258
259 msi->msi_domain = pci_msi_create_irq_domain(msi->node, 259 msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
260 &xgene_msi_domain_info, 260 &xgene_msi_domain_info,
261 msi->inner_domain); 261 msi->inner_domain);
262 262
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 4a7da3c3e035..45a51486d080 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -20,6 +20,7 @@
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/irqdomain.h> 22#include <linux/irqdomain.h>
23#include <linux/of_irq.h>
23 24
24#include "pci.h" 25#include "pci.h"
25 26
@@ -1250,8 +1251,8 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
1250} 1251}
1251 1252
1252/** 1253/**
1253 * pci_msi_create_irq_domain - Creat a MSI interrupt domain 1254 * pci_msi_create_irq_domain - Create a MSI interrupt domain
1254 * @node: Optional device-tree node of the interrupt controller 1255 * @fwnode: Optional fwnode of the interrupt controller
1255 * @info: MSI domain info 1256 * @info: MSI domain info
1256 * @parent: Parent irq domain 1257 * @parent: Parent irq domain
1257 * 1258 *
@@ -1260,7 +1261,7 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
1260 * Returns: 1261 * Returns:
1261 * A domain pointer or NULL in case of failure. 1262 * A domain pointer or NULL in case of failure.
1262 */ 1263 */
1263struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, 1264struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
1264 struct msi_domain_info *info, 1265 struct msi_domain_info *info,
1265 struct irq_domain *parent) 1266 struct irq_domain *parent)
1266{ 1267{
@@ -1271,7 +1272,7 @@ struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
1271 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 1272 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
1272 pci_msi_domain_update_chip_ops(info); 1273 pci_msi_domain_update_chip_ops(info);
1273 1274
1274 domain = msi_create_irq_domain(node, info, parent); 1275 domain = msi_create_irq_domain(fwnode, info, parent);
1275 if (!domain) 1276 if (!domain)
1276 return NULL; 1277 return NULL;
1277 1278
@@ -1307,14 +1308,14 @@ void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev)
1307 1308
1308/** 1309/**
1309 * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain 1310 * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
1310 * @node: Optional device-tree node of the interrupt controller 1311 * @fwnode: Optional fwnode of the interrupt controller
1311 * @info: MSI domain info 1312 * @info: MSI domain info
1312 * @parent: Parent irq domain 1313 * @parent: Parent irq domain
1313 * 1314 *
1314 * Returns: A domain pointer or NULL in case of failure. If successful 1315 * Returns: A domain pointer or NULL in case of failure. If successful
1315 * the default PCI/MSI irqdomain pointer is updated. 1316 * the default PCI/MSI irqdomain pointer is updated.
1316 */ 1317 */
1317struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, 1318struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
1318 struct msi_domain_info *info, struct irq_domain *parent) 1319 struct msi_domain_info *info, struct irq_domain *parent)
1319{ 1320{
1320 struct irq_domain *domain; 1321 struct irq_domain *domain;
@@ -1324,11 +1325,59 @@ struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
1324 pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); 1325 pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
1325 domain = NULL; 1326 domain = NULL;
1326 } else { 1327 } else {
1327 domain = pci_msi_create_irq_domain(node, info, parent); 1328 domain = pci_msi_create_irq_domain(fwnode, info, parent);
1328 pci_msi_default_domain = domain; 1329 pci_msi_default_domain = domain;
1329 } 1330 }
1330 mutex_unlock(&pci_msi_domain_lock); 1331 mutex_unlock(&pci_msi_domain_lock);
1331 1332
1332 return domain; 1333 return domain;
1333} 1334}
1335
1336static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
1337{
1338 u32 *pa = data;
1339
1340 *pa = alias;
1341 return 0;
1342}
1343/**
1344 * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
1345 * @domain: The interrupt domain
1346 * @pdev: The PCI device.
1347 *
1348 * The RID for a device is formed from the alias, with a firmware
1349 * supplied mapping applied
1350 *
1351 * Returns: The RID.
1352 */
1353u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
1354{
1355 struct device_node *of_node;
1356 u32 rid = 0;
1357
1358 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
1359
1360 of_node = irq_domain_get_of_node(domain);
1361 if (of_node)
1362 rid = of_msi_map_rid(&pdev->dev, of_node, rid);
1363
1364 return rid;
1365}
1366
1367/**
1368 * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
1369 * @pdev: The PCI device
1370 *
1371 * Use the firmware data to find a device-specific MSI domain
1372 * (i.e. not one that is ste as a default).
1373 *
1374 * Returns: The coresponding MSI domain or NULL if none has been found.
1375 */
1376struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
1377{
1378 u32 rid = 0;
1379
1380 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
1381 return of_msi_map_get_device_domain(&pdev->dev, rid);
1382}
1334#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ 1383#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 2e99a500cb83..e112da11630e 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_irq.h>
16#include <linux/of_pci.h> 17#include <linux/of_pci.h>
17#include "pci.h" 18#include "pci.h"
18 19
@@ -64,27 +65,25 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
64struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus) 65struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
65{ 66{
66#ifdef CONFIG_IRQ_DOMAIN 67#ifdef CONFIG_IRQ_DOMAIN
67 struct device_node *np;
68 struct irq_domain *d; 68 struct irq_domain *d;
69 69
70 if (!bus->dev.of_node) 70 if (!bus->dev.of_node)
71 return NULL; 71 return NULL;
72 72
73 /* Start looking for a phandle to an MSI controller. */ 73 /* Start looking for a phandle to an MSI controller. */
74 np = of_parse_phandle(bus->dev.of_node, "msi-parent", 0); 74 d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
75 if (d)
76 return d;
75 77
76 /* 78 /*
77 * If we don't have an msi-parent property, look for a domain 79 * If we don't have an msi-parent property, look for a domain
78 * directly attached to the host bridge. 80 * directly attached to the host bridge.
79 */ 81 */
80 if (!np) 82 d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
81 np = bus->dev.of_node;
82
83 d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
84 if (d) 83 if (d)
85 return d; 84 return d;
86 85
87 return irq_find_host(np); 86 return irq_find_host(bus->dev.of_node);
88#else 87#else
89 return NULL; 88 return NULL;
90#endif 89#endif
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 312f23a8429c..92618686604c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -216,7 +216,7 @@ static ssize_t numa_node_store(struct device *dev,
216 if (ret) 216 if (ret)
217 return ret; 217 return ret;
218 218
219 if (!node_online(node)) 219 if (node >= MAX_NUMNODES || !node_online(node))
220 return -EINVAL; 220 return -EINVAL;
221 221
222 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 222 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8361d27e5eca..f14a970b61fa 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1622,15 +1622,48 @@ static void pci_init_capabilities(struct pci_dev *dev)
1622 pci_enable_acs(dev); 1622 pci_enable_acs(dev);
1623} 1623}
1624 1624
1625/*
1626 * This is the equivalent of pci_host_bridge_msi_domain that acts on
1627 * devices. Firmware interfaces that can select the MSI domain on a
1628 * per-device basis should be called from here.
1629 */
1630static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1631{
1632 struct irq_domain *d;
1633
1634 /*
1635 * If a domain has been set through the pcibios_add_device
1636 * callback, then this is the one (platform code knows best).
1637 */
1638 d = dev_get_msi_domain(&dev->dev);
1639 if (d)
1640 return d;
1641
1642 /*
1643 * Let's see if we have a firmware interface able to provide
1644 * the domain.
1645 */
1646 d = pci_msi_get_device_domain(dev);
1647 if (d)
1648 return d;
1649
1650 return NULL;
1651}
1652
1625static void pci_set_msi_domain(struct pci_dev *dev) 1653static void pci_set_msi_domain(struct pci_dev *dev)
1626{ 1654{
1655 struct irq_domain *d;
1656
1627 /* 1657 /*
1628 * If no domain has been set through the pcibios_add_device 1658 * If the platform or firmware interfaces cannot supply a
1629 * callback, inherit the default from the bus device. 1659 * device-specific MSI domain, then inherit the default domain
1660 * from the host bridge itself.
1630 */ 1661 */
1631 if (!dev_get_msi_domain(&dev->dev)) 1662 d = pci_dev_msi_domain(dev);
1632 dev_set_msi_domain(&dev->dev, 1663 if (!d)
1633 dev_get_msi_domain(&dev->bus->dev)); 1664 d = dev_get_msi_domain(&dev->bus->dev);
1665
1666 dev_set_msi_domain(&dev->dev, d);
1634} 1667}
1635 1668
1636void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 1669void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 2365a32a595e..be3755c973e9 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -823,9 +823,15 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
823 } 823 }
824 824
825 /* Now look up the logical CPU number */ 825 /* Now look up the logical CPU number */
826 for_each_possible_cpu(cpu) 826 for_each_possible_cpu(cpu) {
827 if (dn == of_cpu_device_node_get(cpu)) 827 struct device_node *cpu_dn;
828
829 cpu_dn = of_cpu_device_node_get(cpu);
830 of_node_put(cpu_dn);
831
832 if (dn == cpu_dn)
828 break; 833 break;
834 }
829 835
830 if (cpu >= nr_cpu_ids) { 836 if (cpu >= nr_cpu_ids) {
831 pr_warn("Failed to find logical CPU for %s\n", 837 pr_warn("Failed to find logical CPU for %s\n",
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c
index 6e0d9fa8e1d1..c7a05996d5c1 100644
--- a/drivers/phy/phy-rcar-gen2.c
+++ b/drivers/phy/phy-rcar-gen2.c
@@ -17,8 +17,7 @@
17#include <linux/phy/phy.h> 17#include <linux/phy/phy.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20 20#include <linux/atomic.h>
21#include <asm/cmpxchg.h>
22 21
23#define USBHS_LPSTS 0x02 22#define USBHS_LPSTS 0x02
24#define USBHS_UGCTRL 0x80 23#define USBHS_UGCTRL 0x80
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 84dd2ed47a92..b422e4ed73f4 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -67,6 +67,19 @@ config PINCTRL_AT91
67 help 67 help
68 Say Y here to enable the at91 pinctrl driver 68 Say Y here to enable the at91 pinctrl driver
69 69
70config PINCTRL_AT91PIO4
71 bool "AT91 PIO4 pinctrl driver"
72 depends on OF
73 depends on ARCH_AT91
74 select PINMUX
75 select GENERIC_PINCONF
76 select GPIOLIB
77 select GPIOLIB_IRQCHIP
78 select OF_GPIO
79 help
80 Say Y here to enable the at91 pinctrl/gpio driver for Atmel PIO4
81 controller available on sama5d2 SoC.
82
70config PINCTRL_AMD 83config PINCTRL_AMD
71 bool "AMD GPIO pin control" 84 bool "AMD GPIO pin control"
72 depends on GPIOLIB 85 depends on GPIOLIB
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index cad077c43fb7..738cb4929a49 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
12obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o 12obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
13obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o 13obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
14obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o 14obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
15obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
15obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o 16obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
16obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o 17obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
17obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o 18obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
@@ -50,6 +51,6 @@ obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
50obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/ 51obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/
51obj-$(CONFIG_PLAT_SPEAR) += spear/ 52obj-$(CONFIG_PLAT_SPEAR) += spear/
52obj-$(CONFIG_ARCH_SUNXI) += sunxi/ 53obj-$(CONFIG_ARCH_SUNXI) += sunxi/
53obj-$(CONFIG_ARCH_UNIPHIER) += uniphier/ 54obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/
54obj-$(CONFIG_ARCH_VT8500) += vt8500/ 55obj-$(CONFIG_ARCH_VT8500) += vt8500/
55obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ 56obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 8efa235ca1c9..a1ea565fcd46 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -330,16 +330,6 @@ static inline void bcm2835_pinctrl_fsel_set(
330 bcm2835_gpio_wr(pc, FSEL_REG(pin), val); 330 bcm2835_gpio_wr(pc, FSEL_REG(pin), val);
331} 331}
332 332
333static int bcm2835_gpio_request(struct gpio_chip *chip, unsigned offset)
334{
335 return pinctrl_request_gpio(chip->base + offset);
336}
337
338static void bcm2835_gpio_free(struct gpio_chip *chip, unsigned offset)
339{
340 pinctrl_free_gpio(chip->base + offset);
341}
342
343static int bcm2835_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 333static int bcm2835_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
344{ 334{
345 return pinctrl_gpio_direction_input(chip->base + offset); 335 return pinctrl_gpio_direction_input(chip->base + offset);
@@ -375,8 +365,8 @@ static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
375static struct gpio_chip bcm2835_gpio_chip = { 365static struct gpio_chip bcm2835_gpio_chip = {
376 .label = MODULE_NAME, 366 .label = MODULE_NAME,
377 .owner = THIS_MODULE, 367 .owner = THIS_MODULE,
378 .request = bcm2835_gpio_request, 368 .request = gpiochip_generic_request,
379 .free = bcm2835_gpio_free, 369 .free = gpiochip_generic_free,
380 .direction_input = bcm2835_gpio_direction_input, 370 .direction_input = bcm2835_gpio_direction_input,
381 .direction_output = bcm2835_gpio_direction_output, 371 .direction_output = bcm2835_gpio_direction_output,
382 .get = bcm2835_gpio_get, 372 .get = bcm2835_gpio_get,
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
index 1ca783098e47..12a48f498b75 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
@@ -29,7 +29,6 @@
29#include <linux/of_device.h> 29#include <linux/of_device.h>
30#include <linux/of_irq.h> 30#include <linux/of_irq.h>
31#include <linux/pinctrl/pinctrl.h> 31#include <linux/pinctrl/pinctrl.h>
32#include <linux/pinctrl/pinmux.h>
33#include <linux/pinctrl/pinconf.h> 32#include <linux/pinctrl/pinconf.h>
34#include <linux/pinctrl/pinconf-generic.h> 33#include <linux/pinctrl/pinconf-generic.h>
35 34
@@ -597,127 +596,6 @@ static const struct pinconf_ops cygnus_pconf_ops = {
597}; 596};
598 597
599/* 598/*
600 * Map a GPIO in the local gpio_chip pin space to a pin in the Cygnus IOMUX
601 * pinctrl pin space
602 */
603struct cygnus_gpio_pin_range {
604 unsigned offset;
605 unsigned pin_base;
606 unsigned num_pins;
607};
608
609#define CYGNUS_PINRANGE(o, p, n) { .offset = o, .pin_base = p, .num_pins = n }
610
611/*
612 * Pin mapping table for mapping local GPIO pins to Cygnus IOMUX pinctrl pins
613 */
614static const struct cygnus_gpio_pin_range cygnus_gpio_pintable[] = {
615 CYGNUS_PINRANGE(0, 42, 1),
616 CYGNUS_PINRANGE(1, 44, 3),
617 CYGNUS_PINRANGE(4, 48, 1),
618 CYGNUS_PINRANGE(5, 50, 3),
619 CYGNUS_PINRANGE(8, 126, 1),
620 CYGNUS_PINRANGE(9, 155, 1),
621 CYGNUS_PINRANGE(10, 152, 1),
622 CYGNUS_PINRANGE(11, 154, 1),
623 CYGNUS_PINRANGE(12, 153, 1),
624 CYGNUS_PINRANGE(13, 127, 3),
625 CYGNUS_PINRANGE(16, 140, 1),
626 CYGNUS_PINRANGE(17, 145, 7),
627 CYGNUS_PINRANGE(24, 130, 10),
628 CYGNUS_PINRANGE(34, 141, 4),
629 CYGNUS_PINRANGE(38, 54, 1),
630 CYGNUS_PINRANGE(39, 56, 3),
631 CYGNUS_PINRANGE(42, 60, 3),
632 CYGNUS_PINRANGE(45, 64, 3),
633 CYGNUS_PINRANGE(48, 68, 2),
634 CYGNUS_PINRANGE(50, 84, 6),
635 CYGNUS_PINRANGE(56, 94, 6),
636 CYGNUS_PINRANGE(62, 72, 1),
637 CYGNUS_PINRANGE(63, 70, 1),
638 CYGNUS_PINRANGE(64, 80, 1),
639 CYGNUS_PINRANGE(65, 74, 3),
640 CYGNUS_PINRANGE(68, 78, 1),
641 CYGNUS_PINRANGE(69, 82, 1),
642 CYGNUS_PINRANGE(70, 156, 17),
643 CYGNUS_PINRANGE(87, 104, 12),
644 CYGNUS_PINRANGE(99, 102, 2),
645 CYGNUS_PINRANGE(101, 90, 4),
646 CYGNUS_PINRANGE(105, 116, 6),
647 CYGNUS_PINRANGE(111, 100, 2),
648 CYGNUS_PINRANGE(113, 122, 4),
649 CYGNUS_PINRANGE(123, 11, 1),
650 CYGNUS_PINRANGE(124, 38, 4),
651 CYGNUS_PINRANGE(128, 43, 1),
652 CYGNUS_PINRANGE(129, 47, 1),
653 CYGNUS_PINRANGE(130, 49, 1),
654 CYGNUS_PINRANGE(131, 53, 1),
655 CYGNUS_PINRANGE(132, 55, 1),
656 CYGNUS_PINRANGE(133, 59, 1),
657 CYGNUS_PINRANGE(134, 63, 1),
658 CYGNUS_PINRANGE(135, 67, 1),
659 CYGNUS_PINRANGE(136, 71, 1),
660 CYGNUS_PINRANGE(137, 73, 1),
661 CYGNUS_PINRANGE(138, 77, 1),
662 CYGNUS_PINRANGE(139, 79, 1),
663 CYGNUS_PINRANGE(140, 81, 1),
664 CYGNUS_PINRANGE(141, 83, 1),
665 CYGNUS_PINRANGE(142, 10, 1)
666};
667
668/*
669 * The Cygnus IOMUX controller mainly supports group based mux configuration,
670 * but certain pins can be muxed to GPIO individually. Only the ASIU GPIO
671 * controller can support this, so it's an optional configuration
672 *
673 * Return -ENODEV means no support and that's fine
674 */
675static int cygnus_gpio_pinmux_add_range(struct cygnus_gpio *chip)
676{
677 struct device_node *node = chip->dev->of_node;
678 struct device_node *pinmux_node;
679 struct platform_device *pinmux_pdev;
680 struct gpio_chip *gc = &chip->gc;
681 int i, ret = 0;
682
683 /* parse DT to find the phandle to the pinmux controller */
684 pinmux_node = of_parse_phandle(node, "pinmux", 0);
685 if (!pinmux_node)
686 return -ENODEV;
687
688 pinmux_pdev = of_find_device_by_node(pinmux_node);
689 /* no longer need the pinmux node */
690 of_node_put(pinmux_node);
691 if (!pinmux_pdev) {
692 dev_err(chip->dev, "failed to get pinmux device\n");
693 return -EINVAL;
694 }
695
696 /* now need to create the mapping between local GPIO and PINMUX pins */
697 for (i = 0; i < ARRAY_SIZE(cygnus_gpio_pintable); i++) {
698 ret = gpiochip_add_pin_range(gc, dev_name(&pinmux_pdev->dev),
699 cygnus_gpio_pintable[i].offset,
700 cygnus_gpio_pintable[i].pin_base,
701 cygnus_gpio_pintable[i].num_pins);
702 if (ret) {
703 dev_err(chip->dev, "unable to add GPIO pin range\n");
704 goto err_put_device;
705 }
706 }
707
708 chip->pinmux_is_supported = true;
709
710 /* no need for pinmux_pdev device reference anymore */
711 put_device(&pinmux_pdev->dev);
712 return 0;
713
714err_put_device:
715 put_device(&pinmux_pdev->dev);
716 gpiochip_remove_pin_ranges(gc);
717 return ret;
718}
719
720/*
721 * Cygnus GPIO controller supports some PINCONF related configurations such as 599 * Cygnus GPIO controller supports some PINCONF related configurations such as
722 * pull up, pull down, and drive strength, when the pin is configured to GPIO 600 * pull up, pull down, and drive strength, when the pin is configured to GPIO
723 * 601 *
@@ -851,18 +729,15 @@ static int cygnus_gpio_probe(struct platform_device *pdev)
851 gc->set = cygnus_gpio_set; 729 gc->set = cygnus_gpio_set;
852 gc->get = cygnus_gpio_get; 730 gc->get = cygnus_gpio_get;
853 731
732 chip->pinmux_is_supported = of_property_read_bool(dev->of_node,
733 "gpio-ranges");
734
854 ret = gpiochip_add(gc); 735 ret = gpiochip_add(gc);
855 if (ret < 0) { 736 if (ret < 0) {
856 dev_err(dev, "unable to add GPIO chip\n"); 737 dev_err(dev, "unable to add GPIO chip\n");
857 return ret; 738 return ret;
858 } 739 }
859 740
860 ret = cygnus_gpio_pinmux_add_range(chip);
861 if (ret && ret != -ENODEV) {
862 dev_err(dev, "unable to add GPIO pin range\n");
863 goto err_rm_gpiochip;
864 }
865
866 ret = cygnus_gpio_register_pinconf(chip); 741 ret = cygnus_gpio_register_pinconf(chip);
867 if (ret) { 742 if (ret) {
868 dev_err(dev, "unable to register pinconf\n"); 743 dev_err(dev, "unable to register pinconf\n");
diff --git a/drivers/pinctrl/berlin/Kconfig b/drivers/pinctrl/berlin/Kconfig
index b18322bc7bf9..8fe6ad7795dc 100644
--- a/drivers/pinctrl/berlin/Kconfig
+++ b/drivers/pinctrl/berlin/Kconfig
@@ -1,4 +1,4 @@
1if ARCH_BERLIN 1if (ARCH_BERLIN || COMPILE_TEST)
2 2
3config PINCTRL_BERLIN 3config PINCTRL_BERLIN
4 bool 4 bool
@@ -6,15 +6,23 @@ config PINCTRL_BERLIN
6 select REGMAP_MMIO 6 select REGMAP_MMIO
7 7
8config PINCTRL_BERLIN_BG2 8config PINCTRL_BERLIN_BG2
9 bool 9 def_bool MACH_BERLIN_BG2
10 depends on OF
10 select PINCTRL_BERLIN 11 select PINCTRL_BERLIN
11 12
12config PINCTRL_BERLIN_BG2CD 13config PINCTRL_BERLIN_BG2CD
13 bool 14 def_bool MACH_BERLIN_BG2CD
15 depends on OF
14 select PINCTRL_BERLIN 16 select PINCTRL_BERLIN
15 17
16config PINCTRL_BERLIN_BG2Q 18config PINCTRL_BERLIN_BG2Q
17 bool 19 def_bool MACH_BERLIN_BG2Q
20 depends on OF
21 select PINCTRL_BERLIN
22
23config PINCTRL_BERLIN_BG4CT
24 bool "Marvell berlin4ct pin controller driver"
25 depends on OF
18 select PINCTRL_BERLIN 26 select PINCTRL_BERLIN
19 27
20endif 28endif
diff --git a/drivers/pinctrl/berlin/Makefile b/drivers/pinctrl/berlin/Makefile
index deb0c6baf316..06f94029ad66 100644
--- a/drivers/pinctrl/berlin/Makefile
+++ b/drivers/pinctrl/berlin/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_PINCTRL_BERLIN) += berlin.o
2obj-$(CONFIG_PINCTRL_BERLIN_BG2) += berlin-bg2.o 2obj-$(CONFIG_PINCTRL_BERLIN_BG2) += berlin-bg2.o
3obj-$(CONFIG_PINCTRL_BERLIN_BG2CD) += berlin-bg2cd.o 3obj-$(CONFIG_PINCTRL_BERLIN_BG2CD) += berlin-bg2cd.o
4obj-$(CONFIG_PINCTRL_BERLIN_BG2Q) += berlin-bg2q.o 4obj-$(CONFIG_PINCTRL_BERLIN_BG2Q) += berlin-bg2q.o
5obj-$(CONFIG_PINCTRL_BERLIN_BG4CT) += berlin-bg4ct.o
diff --git a/drivers/pinctrl/berlin/berlin-bg2.c b/drivers/pinctrl/berlin/berlin-bg2.c
index 274c5535b531..fabe728ae268 100644
--- a/drivers/pinctrl/berlin/berlin-bg2.c
+++ b/drivers/pinctrl/berlin/berlin-bg2.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2014 Marvell Technology Group Ltd. 4 * Copyright (C) 2014 Marvell Technology Group Ltd.
5 * 5 *
6 * Antoine Ténart <antoine.tenart@free-electrons.com> 6 * Antoine Ténart <antoine.tenart@free-electrons.com>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
@@ -246,6 +246,6 @@ static struct platform_driver berlin2_pinctrl_driver = {
246}; 246};
247module_platform_driver(berlin2_pinctrl_driver); 247module_platform_driver(berlin2_pinctrl_driver);
248 248
249MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>"); 249MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>");
250MODULE_DESCRIPTION("Marvell Berlin BG2 pinctrl driver"); 250MODULE_DESCRIPTION("Marvell Berlin BG2 pinctrl driver");
251MODULE_LICENSE("GPL"); 251MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/berlin/berlin-bg2cd.c b/drivers/pinctrl/berlin/berlin-bg2cd.c
index 0cb793a3552a..ad8c75861373 100644
--- a/drivers/pinctrl/berlin/berlin-bg2cd.c
+++ b/drivers/pinctrl/berlin/berlin-bg2cd.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2014 Marvell Technology Group Ltd. 4 * Copyright (C) 2014 Marvell Technology Group Ltd.
5 * 5 *
6 * Antoine Ténart <antoine.tenart@free-electrons.com> 6 * Antoine Ténart <antoine.tenart@free-electrons.com>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
@@ -19,24 +19,24 @@
19 19
20static const struct berlin_desc_group berlin2cd_soc_pinctrl_groups[] = { 20static const struct berlin_desc_group berlin2cd_soc_pinctrl_groups[] = {
21 /* G */ 21 /* G */
22 BERLIN_PINCTRL_GROUP("G0", 0x00, 0x1, 0x00, 22 BERLIN_PINCTRL_GROUP("G0", 0x00, 0x3, 0x00,
23 BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), 23 BERLIN_PINCTRL_FUNCTION(0x0, "jtag"),
24 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), 24 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"),
25 BERLIN_PINCTRL_FUNCTION(0x2, "led"), 25 BERLIN_PINCTRL_FUNCTION(0x2, "led"),
26 BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), 26 BERLIN_PINCTRL_FUNCTION(0x3, "pwm")),
27 BERLIN_PINCTRL_GROUP("G1", 0x00, 0x2, 0x01, 27 BERLIN_PINCTRL_GROUP("G1", 0x00, 0x3, 0x03,
28 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), 28 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
29 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), 29 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"),
30 BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"), 30 BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"),
31 BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")), 31 BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")),
32 BERLIN_PINCTRL_GROUP("G2", 0x00, 0x2, 0x02, 32 BERLIN_PINCTRL_GROUP("G2", 0x00, 0x3, 0x06,
33 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), 33 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
34 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), 34 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"),
35 BERLIN_PINCTRL_FUNCTION(0x2, "fe"), 35 BERLIN_PINCTRL_FUNCTION(0x2, "fe"),
36 BERLIN_PINCTRL_FUNCTION(0x3, "pll"), 36 BERLIN_PINCTRL_FUNCTION(0x3, "pll"),
37 BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"), 37 BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"),
38 BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")), 38 BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")),
39 BERLIN_PINCTRL_GROUP("G3", 0x00, 0x2, 0x04, 39 BERLIN_PINCTRL_GROUP("G3", 0x00, 0x3, 0x09,
40 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), 40 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
41 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), 41 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"),
42 BERLIN_PINCTRL_FUNCTION(0x2, "twsi2"), 42 BERLIN_PINCTRL_FUNCTION(0x2, "twsi2"),
@@ -44,7 +44,7 @@ static const struct berlin_desc_group berlin2cd_soc_pinctrl_groups[] = {
44 BERLIN_PINCTRL_FUNCTION(0x4, "fe"), 44 BERLIN_PINCTRL_FUNCTION(0x4, "fe"),
45 BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"), 45 BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"),
46 BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")), 46 BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")),
47 BERLIN_PINCTRL_GROUP("G4", 0x00, 0x2, 0x06, 47 BERLIN_PINCTRL_GROUP("G4", 0x00, 0x3, 0x0c,
48 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), 48 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
49 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), 49 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"),
50 BERLIN_PINCTRL_FUNCTION(0x2, "twsi3"), 50 BERLIN_PINCTRL_FUNCTION(0x2, "twsi3"),
@@ -52,7 +52,7 @@ static const struct berlin_desc_group berlin2cd_soc_pinctrl_groups[] = {
52 BERLIN_PINCTRL_FUNCTION(0x4, "pwm"), 52 BERLIN_PINCTRL_FUNCTION(0x4, "pwm"),
53 BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"), 53 BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"),
54 BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")), 54 BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")),
55 BERLIN_PINCTRL_GROUP("G5", 0x00, 0x3, 0x08, 55 BERLIN_PINCTRL_GROUP("G5", 0x00, 0x3, 0x0f,
56 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), 56 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
57 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), 57 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"),
58 BERLIN_PINCTRL_FUNCTION(0x2, "twsi3"), 58 BERLIN_PINCTRL_FUNCTION(0x2, "twsi3"),
@@ -60,64 +60,66 @@ static const struct berlin_desc_group berlin2cd_soc_pinctrl_groups[] = {
60 BERLIN_PINCTRL_FUNCTION(0x4, "pwm"), 60 BERLIN_PINCTRL_FUNCTION(0x4, "pwm"),
61 BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"), 61 BERLIN_PINCTRL_FUNCTION(0x6, "usb0_dbg"),
62 BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")), 62 BERLIN_PINCTRL_FUNCTION(0x7, "usb1_dbg")),
63 BERLIN_PINCTRL_GROUP("G6", 0x00, 0x2, 0x0b, 63 BERLIN_PINCTRL_GROUP("G6", 0x00, 0x3, 0x12,
64 BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* RX/TX */ 64 BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* RX/TX */
65 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), 65 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
66 BERLIN_PINCTRL_GROUP("G7", 0x00, 0x3, 0x0d, 66 BERLIN_PINCTRL_GROUP("G7", 0x00, 0x3, 0x15,
67 BERLIN_PINCTRL_FUNCTION(0x0, "eddc"), 67 BERLIN_PINCTRL_FUNCTION(0x0, "eddc"),
68 BERLIN_PINCTRL_FUNCTION(0x1, "twsi1"), 68 BERLIN_PINCTRL_FUNCTION(0x1, "twsi1"),
69 BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), 69 BERLIN_PINCTRL_FUNCTION(0x2, "gpio")),
70 BERLIN_PINCTRL_GROUP("G8", 0x00, 0x3, 0x10, 70 BERLIN_PINCTRL_GROUP("G8", 0x00, 0x3, 0x18,
71 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ 71 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
72 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), 72 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
73 BERLIN_PINCTRL_GROUP("G9", 0x00, 0x3, 0x13, 73 BERLIN_PINCTRL_GROUP("G9", 0x00, 0x3, 0x1b,
74 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), 74 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"),
75 BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS1n/SS2n */ 75 BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS1n/SS2n */
76 BERLIN_PINCTRL_FUNCTION(0x2, "twsi0")), 76 BERLIN_PINCTRL_FUNCTION(0x3, "twsi0")),
77 BERLIN_PINCTRL_GROUP("G10", 0x00, 0x2, 0x16, 77 BERLIN_PINCTRL_GROUP("G10", 0x00, 0x2, 0x1e,
78 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* CLK */ 78 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* CLK */
79 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), 79 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
80 BERLIN_PINCTRL_GROUP("G11", 0x00, 0x2, 0x18, 80 BERLIN_PINCTRL_GROUP("G11", 0x04, 0x2, 0x00,
81 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDI/SDO */ 81 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDI/SDO */
82 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), 82 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
83 BERLIN_PINCTRL_GROUP("G12", 0x00, 0x3, 0x1a, 83 BERLIN_PINCTRL_GROUP("G12", 0x04, 0x3, 0x02,
84 BERLIN_PINCTRL_FUNCTION(0x0, "usb1"), 84 BERLIN_PINCTRL_FUNCTION(0x0, "usb1"),
85 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), 85 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
86 BERLIN_PINCTRL_GROUP("G13", 0x04, 0x3, 0x00, 86 BERLIN_PINCTRL_GROUP("G13", 0x04, 0x3, 0x05,
87 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), 87 BERLIN_PINCTRL_FUNCTION(0x0, "nand"),
88 BERLIN_PINCTRL_FUNCTION(0x1, "usb0_dbg"), 88 BERLIN_PINCTRL_FUNCTION(0x1, "usb0_dbg"),
89 BERLIN_PINCTRL_FUNCTION(0x2, "usb1_dbg")), 89 BERLIN_PINCTRL_FUNCTION(0x2, "usb1_dbg")),
90 BERLIN_PINCTRL_GROUP("G14", 0x04, 0x1, 0x03, 90 BERLIN_PINCTRL_GROUP("G14", 0x04, 0x1, 0x08,
91 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), 91 BERLIN_PINCTRL_FUNCTION(0x0, "nand"),
92 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), 92 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
93 BERLIN_PINCTRL_GROUP("G15", 0x04, 0x2, 0x04, 93 BERLIN_PINCTRL_GROUP("G15", 0x04, 0x3, 0x09,
94 BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), 94 BERLIN_PINCTRL_FUNCTION(0x0, "jtag"),
95 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), 95 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")),
96 BERLIN_PINCTRL_GROUP("G16", 0x04, 0x3, 0x06, 96 BERLIN_PINCTRL_GROUP("G16", 0x04, 0x3, 0x0c,
97 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 97 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
98 BERLIN_PINCTRL_GROUP("G17", 0x04, 0x3, 0x09, 98 BERLIN_PINCTRL_GROUP("G17", 0x04, 0x3, 0x0f,
99 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 99 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
100 BERLIN_PINCTRL_GROUP("G18", 0x04, 0x1, 0x0c, 100 BERLIN_PINCTRL_GROUP("G18", 0x04, 0x2, 0x12,
101 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 101 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
102 BERLIN_PINCTRL_GROUP("G19", 0x04, 0x1, 0x0d, 102 BERLIN_PINCTRL_GROUP("G19", 0x04, 0x2, 0x14,
103 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 103 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
104 BERLIN_PINCTRL_GROUP("G20", 0x04, 0x1, 0x0e, 104 BERLIN_PINCTRL_GROUP("G20", 0x04, 0x2, 0x16,
105 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 105 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
106 BERLIN_PINCTRL_GROUP("G21", 0x04, 0x3, 0x0f, 106 BERLIN_PINCTRL_GROUP("G21", 0x04, 0x3, 0x18,
107 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 107 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
108 BERLIN_PINCTRL_GROUP("G22", 0x04, 0x3, 0x12, 108 BERLIN_PINCTRL_GROUP("G22", 0x04, 0x3, 0x1b,
109 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 109 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
110 BERLIN_PINCTRL_GROUP("G23", 0x04, 0x3, 0x15, 110 BERLIN_PINCTRL_GROUP("G23", 0x08, 0x3, 0x00,
111 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 111 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
112 BERLIN_PINCTRL_GROUP("G24", 0x04, 0x2, 0x18, 112 BERLIN_PINCTRL_GROUP("G24", 0x08, 0x2, 0x03,
113 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 113 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
114 BERLIN_PINCTRL_GROUP("G25", 0x04, 0x2, 0x1a, 114 BERLIN_PINCTRL_GROUP("G25", 0x08, 0x2, 0x05,
115 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 115 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
116 BERLIN_PINCTRL_GROUP("G26", 0x04, 0x1, 0x1c, 116 BERLIN_PINCTRL_GROUP("G26", 0x08, 0x1, 0x07,
117 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 117 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
118 BERLIN_PINCTRL_GROUP("G27", 0x04, 0x1, 0x1d, 118 BERLIN_PINCTRL_GROUP("G27", 0x08, 0x2, 0x08,
119 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 119 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
120 BERLIN_PINCTRL_GROUP("G28", 0x04, 0x2, 0x1e, 120 BERLIN_PINCTRL_GROUP("G28", 0x08, 0x3, 0x0a,
121 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
122 BERLIN_PINCTRL_GROUP("G29", 0x08, 0x3, 0x0d,
121 BERLIN_PINCTRL_FUNCTION_UNKNOWN), 123 BERLIN_PINCTRL_FUNCTION_UNKNOWN),
122}; 124};
123 125
@@ -189,6 +191,6 @@ static struct platform_driver berlin2cd_pinctrl_driver = {
189}; 191};
190module_platform_driver(berlin2cd_pinctrl_driver); 192module_platform_driver(berlin2cd_pinctrl_driver);
191 193
192MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>"); 194MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>");
193MODULE_DESCRIPTION("Marvell Berlin BG2CD pinctrl driver"); 195MODULE_DESCRIPTION("Marvell Berlin BG2CD pinctrl driver");
194MODULE_LICENSE("GPL"); 196MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/berlin/berlin-bg2q.c b/drivers/pinctrl/berlin/berlin-bg2q.c
index a466054a8206..cd171aea8ca8 100644
--- a/drivers/pinctrl/berlin/berlin-bg2q.c
+++ b/drivers/pinctrl/berlin/berlin-bg2q.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2014 Marvell Technology Group Ltd. 4 * Copyright (C) 2014 Marvell Technology Group Ltd.
5 * 5 *
6 * Antoine Ténart <antoine.tenart@free-electrons.com> 6 * Antoine Ténart <antoine.tenart@free-electrons.com>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
@@ -408,6 +408,6 @@ static struct platform_driver berlin2q_pinctrl_driver = {
408}; 408};
409module_platform_driver(berlin2q_pinctrl_driver); 409module_platform_driver(berlin2q_pinctrl_driver);
410 410
411MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>"); 411MODULE_AUTHOR("Antoine Ténart <antoine.tenart@free-electrons.com>");
412MODULE_DESCRIPTION("Marvell Berlin BG2Q pinctrl driver"); 412MODULE_DESCRIPTION("Marvell Berlin BG2Q pinctrl driver");
413MODULE_LICENSE("GPL"); 413MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
new file mode 100644
index 000000000000..09172043d589
--- /dev/null
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -0,0 +1,503 @@
1/*
2 * Marvell berlin4ct pinctrl driver
3 *
4 * Copyright (C) 2015 Marvell Technology Group Ltd.
5 *
6 * Author: Jisheng Zhang <jszhang@marvell.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/module.h>
22#include <linux/of_device.h>
23#include <linux/platform_device.h>
24#include <linux/regmap.h>
25
26#include "berlin.h"
27
28static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
29 BERLIN_PINCTRL_GROUP("EMMC_RSTn", 0x0, 0x3, 0x00,
30 BERLIN_PINCTRL_FUNCTION(0x0, "emmc"), /* RSTn */
31 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* GPIO47 */
32 BERLIN_PINCTRL_GROUP("NAND_IO0", 0x0, 0x3, 0x03,
33 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO0 */
34 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXD0 */
35 BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* CLK */
36 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO0 */
37 BERLIN_PINCTRL_GROUP("NAND_IO1", 0x0, 0x3, 0x06,
38 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO1 */
39 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXD1 */
40 BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* CDn */
41 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO1 */
42 BERLIN_PINCTRL_GROUP("NAND_IO2", 0x0, 0x3, 0x09,
43 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO2 */
44 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXD2 */
45 BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* DAT0 */
46 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO2 */
47 BERLIN_PINCTRL_GROUP("NAND_IO3", 0x0, 0x3, 0x0c,
48 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO3 */
49 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXD3 */
50 BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* DAT1 */
51 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO3 */
52 BERLIN_PINCTRL_GROUP("NAND_IO4", 0x0, 0x3, 0x0f,
53 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO4 */
54 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXC */
55 BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* DAT2 */
56 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO4 */
57 BERLIN_PINCTRL_GROUP("NAND_IO5", 0x0, 0x3, 0x12,
58 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO5 */
59 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* RXCTL */
60 BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* DAT3 */
61 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO5 */
62 BERLIN_PINCTRL_GROUP("NAND_IO6", 0x0, 0x3, 0x15,
63 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO6 */
64 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* MDC */
65 BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* CMD */
66 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO6 */
67 BERLIN_PINCTRL_GROUP("NAND_IO7", 0x0, 0x3, 0x18,
68 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO7 */
69 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* MDIO */
70 BERLIN_PINCTRL_FUNCTION(0x2, "sd1"), /* WP */
71 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO7 */
72 BERLIN_PINCTRL_GROUP("NAND_ALE", 0x0, 0x3, 0x1b,
73 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* ALE */
74 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXD0 */
75 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO8 */
76 BERLIN_PINCTRL_GROUP("NAND_CLE", 0x4, 0x3, 0x00,
77 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* CLE */
78 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXD1 */
79 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO9 */
80 BERLIN_PINCTRL_GROUP("NAND_WEn", 0x4, 0x3, 0x03,
81 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* WEn */
82 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXD2 */
83 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO10 */
84 BERLIN_PINCTRL_GROUP("NAND_REn", 0x4, 0x3, 0x06,
85 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* REn */
86 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXD3 */
87 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO11 */
88 BERLIN_PINCTRL_GROUP("NAND_WPn", 0x4, 0x3, 0x09,
89 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* WPn */
90 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO12 */
91 BERLIN_PINCTRL_GROUP("NAND_CEn", 0x4, 0x3, 0x0c,
92 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* CEn */
93 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXC */
94 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO13 */
95 BERLIN_PINCTRL_GROUP("NAND_RDY", 0x4, 0x3, 0x0f,
96 BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* RDY */
97 BERLIN_PINCTRL_FUNCTION(0x1, "rgmii"), /* TXCTL */
98 BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO14 */
99 BERLIN_PINCTRL_GROUP("SD0_CLK", 0x4, 0x3, 0x12,
100 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO29 */
101 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CLK*/
102 BERLIN_PINCTRL_FUNCTION(0x2, "sts4"), /* CLK */
103 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG8 */
104 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG8 */
105 BERLIN_PINCTRL_GROUP("SD0_DAT0", 0x4, 0x3, 0x15,
106 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO30 */
107 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT0 */
108 BERLIN_PINCTRL_FUNCTION(0x2, "sts4"), /* SOP */
109 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG9 */
110 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG9 */
111 BERLIN_PINCTRL_GROUP("SD0_DAT1", 0x4, 0x3, 0x18,
112 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO31 */
113 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT1 */
114 BERLIN_PINCTRL_FUNCTION(0x2, "sts4"), /* SD */
115 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG10 */
116 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG10 */
117 BERLIN_PINCTRL_GROUP("SD0_DAT2", 0x4, 0x3, 0x1b,
118 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO32 */
119 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT2 */
120 BERLIN_PINCTRL_FUNCTION(0x2, "sts4"), /* VALD */
121 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG11 */
122 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG11 */
123 BERLIN_PINCTRL_GROUP("SD0_DAT3", 0x8, 0x3, 0x00,
124 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO33 */
125 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT3 */
126 BERLIN_PINCTRL_FUNCTION(0x2, "sts5"), /* CLK */
127 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG12 */
128 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG12 */
129 BERLIN_PINCTRL_GROUP("SD0_CDn", 0x8, 0x3, 0x03,
130 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO34 */
131 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CDn */
132 BERLIN_PINCTRL_FUNCTION(0x2, "sts5"), /* SOP */
133 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG13 */
134 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG13 */
135 BERLIN_PINCTRL_GROUP("SD0_CMD", 0x8, 0x3, 0x06,
136 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO35 */
137 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CMD */
138 BERLIN_PINCTRL_FUNCTION(0x2, "sts5"), /* SD */
139 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG14 */
140 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG14 */
141 BERLIN_PINCTRL_GROUP("SD0_WP", 0x8, 0x3, 0x09,
142 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO36 */
143 BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* WP */
144 BERLIN_PINCTRL_FUNCTION(0x2, "sts5"), /* VALD */
145 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG15 */
146 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG15 */
147 BERLIN_PINCTRL_GROUP("STS0_CLK", 0x8, 0x3, 0x0c,
148 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO21 */
149 BERLIN_PINCTRL_FUNCTION(0x1, "sts0"), /* CLK */
150 BERLIN_PINCTRL_FUNCTION(0x2, "cpupll"), /* CLKO */
151 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG0 */
152 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG0 */
153 BERLIN_PINCTRL_GROUP("STS0_SOP", 0x8, 0x3, 0x0f,
154 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO22 */
155 BERLIN_PINCTRL_FUNCTION(0x1, "sts0"), /* SOP */
156 BERLIN_PINCTRL_FUNCTION(0x2, "syspll"), /* CLKO */
157 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG1 */
158 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG1 */
159 BERLIN_PINCTRL_GROUP("STS0_SD", 0x8, 0x3, 0x12,
160 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO23 */
161 BERLIN_PINCTRL_FUNCTION(0x1, "sts0"), /* SD */
162 BERLIN_PINCTRL_FUNCTION(0x2, "mempll"), /* CLKO */
163 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG2 */
164 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG2 */
165 BERLIN_PINCTRL_GROUP("STS0_VALD", 0x8, 0x3, 0x15,
166 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO24 */
167 BERLIN_PINCTRL_FUNCTION(0x1, "sts0"), /* VALD */
168 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG3 */
169 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG3 */
170 BERLIN_PINCTRL_GROUP("STS1_CLK", 0x8, 0x3, 0x18,
171 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO25 */
172 BERLIN_PINCTRL_FUNCTION(0x1, "sts1"), /* CLK */
173 BERLIN_PINCTRL_FUNCTION(0x2, "pwm0"),
174 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG4 */
175 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG4 */
176 BERLIN_PINCTRL_GROUP("STS1_SOP", 0x8, 0x3, 0x1b,
177 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO26 */
178 BERLIN_PINCTRL_FUNCTION(0x1, "sts1"), /* SOP */
179 BERLIN_PINCTRL_FUNCTION(0x2, "pwm1"),
180 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG5 */
181 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG5 */
182 BERLIN_PINCTRL_GROUP("STS1_SD", 0xc, 0x3, 0x00,
183 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO27 */
184 BERLIN_PINCTRL_FUNCTION(0x1, "sts1"), /* SD */
185 BERLIN_PINCTRL_FUNCTION(0x2, "pwm2"),
186 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG6 */
187 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG6 */
188 BERLIN_PINCTRL_GROUP("STS1_VALD", 0xc, 0x3, 0x03,
189 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO28 */
190 BERLIN_PINCTRL_FUNCTION(0x1, "sts1"), /* VALD */
191 BERLIN_PINCTRL_FUNCTION(0x2, "pwm3"),
192 BERLIN_PINCTRL_FUNCTION(0x5, "v4g"), /* DBG7 */
193 BERLIN_PINCTRL_FUNCTION(0x7, "phy")), /* DBG7 */
194 BERLIN_PINCTRL_GROUP("SCRD0_RST", 0xc, 0x3, 0x06,
195 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO15 */
196 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* RST */
197 BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* CLK */
198 BERLIN_PINCTRL_GROUP("SCRD0_DCLK", 0xc, 0x3, 0x09,
199 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO16 */
200 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* DCLK */
201 BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* CMD */
202 BERLIN_PINCTRL_GROUP("SCRD0_GPIO0", 0xc, 0x3, 0x0c,
203 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO17 */
204 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* SCRD0 GPIO0 */
205 BERLIN_PINCTRL_FUNCTION(0x2, "sif"), /* DIO */
206 BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT0 */
207 BERLIN_PINCTRL_GROUP("SCRD0_GPIO1", 0xc, 0x3, 0x0f,
208 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO18 */
209 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* SCRD0 GPIO1 */
210 BERLIN_PINCTRL_FUNCTION(0x2, "sif"), /* CLK */
211 BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT1 */
212 BERLIN_PINCTRL_GROUP("SCRD0_DIO", 0xc, 0x3, 0x12,
213 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO19 */
214 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* DIO */
215 BERLIN_PINCTRL_FUNCTION(0x2, "sif"), /* DEN */
216 BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT2 */
217 BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
218 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
219 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
220 BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
221 BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
222 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
223 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
224 BERLIN_PINCTRL_FUNCTION(0x2, "sts2")), /* CLK */
225 BERLIN_PINCTRL_GROUP("SPI1_SS1n", 0xc, 0x3, 0x1b,
226 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS1n */
227 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO38 */
228 BERLIN_PINCTRL_FUNCTION(0x2, "sts2"), /* SOP */
229 BERLIN_PINCTRL_FUNCTION(0x4, "pwm1")),
230 BERLIN_PINCTRL_GROUP("SPI1_SS2n", 0x10, 0x3, 0x00,
231 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS2n */
232 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO39 */
233 BERLIN_PINCTRL_FUNCTION(0x2, "sts2"), /* SD */
234 BERLIN_PINCTRL_FUNCTION(0x4, "pwm0")),
235 BERLIN_PINCTRL_GROUP("SPI1_SS3n", 0x10, 0x3, 0x03,
236 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS3n */
237 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO40 */
238 BERLIN_PINCTRL_FUNCTION(0x2, "sts2")), /* VALD */
239 BERLIN_PINCTRL_GROUP("SPI1_SCLK", 0x10, 0x3, 0x06,
240 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SCLK */
241 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO41 */
242 BERLIN_PINCTRL_FUNCTION(0x2, "sts3")), /* CLK */
243 BERLIN_PINCTRL_GROUP("SPI1_SDO", 0x10, 0x3, 0x09,
244 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDO */
245 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO42 */
246 BERLIN_PINCTRL_FUNCTION(0x2, "sts3")), /* SOP */
247 BERLIN_PINCTRL_GROUP("SPI1_SDI", 0x10, 0x3, 0x0c,
248 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDI */
249 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO43 */
250 BERLIN_PINCTRL_FUNCTION(0x2, "sts3")), /* SD */
251 BERLIN_PINCTRL_GROUP("USB0_DRV_VBUS", 0x10, 0x3, 0x0f,
252 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO44 */
253 BERLIN_PINCTRL_FUNCTION(0x1, "usb0"), /* VBUS */
254 BERLIN_PINCTRL_FUNCTION(0x2, "sts3")), /* VALD */
255 BERLIN_PINCTRL_GROUP("TW0_SCL", 0x10, 0x3, 0x12,
256 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO45 */
257 BERLIN_PINCTRL_FUNCTION(0x1, "tw0")), /* SCL */
258 BERLIN_PINCTRL_GROUP("TW0_SDA", 0x10, 0x3, 0x15,
259 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO46 */
260 BERLIN_PINCTRL_FUNCTION(0x1, "tw0")), /* SDA */
261};
262
263static const struct berlin_desc_group berlin4ct_avio_pinctrl_groups[] = {
264 BERLIN_PINCTRL_GROUP("TX_EDDC_SCL", 0x0, 0x3, 0x00,
265 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO0 */
266 BERLIN_PINCTRL_FUNCTION(0x1, "tx_eddc"), /* SCL */
267 BERLIN_PINCTRL_FUNCTION(0x2, "tw1")), /* SCL */
268 BERLIN_PINCTRL_GROUP("TX_EDDC_SDA", 0x0, 0x3, 0x03,
269 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO1 */
270 BERLIN_PINCTRL_FUNCTION(0x1, "tx_eddc"), /* SDA */
271 BERLIN_PINCTRL_FUNCTION(0x2, "tw1")), /* SDA */
272 BERLIN_PINCTRL_GROUP("I2S1_LRCKO", 0x0, 0x3, 0x06,
273 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO2 */
274 BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* LRCKO */
275 BERLIN_PINCTRL_FUNCTION(0x3, "sts6"), /* CLK */
276 BERLIN_PINCTRL_FUNCTION(0x4, "adac"), /* DBG0 */
277 BERLIN_PINCTRL_FUNCTION(0x6, "sd1b"), /* CLK */
278 BERLIN_PINCTRL_FUNCTION(0x7, "avio")), /* DBG0 */
279 BERLIN_PINCTRL_GROUP("I2S1_BCLKO", 0x0, 0x3, 0x09,
280 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO3 */
281 BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* BCLKO */
282 BERLIN_PINCTRL_FUNCTION(0x3, "sts6"), /* SOP */
283 BERLIN_PINCTRL_FUNCTION(0x4, "adac"), /* DBG1 */
284 BERLIN_PINCTRL_FUNCTION(0x6, "sd1b"), /* CMD */
285 BERLIN_PINCTRL_FUNCTION(0x7, "avio")), /* DBG1 */
286 BERLIN_PINCTRL_GROUP("I2S1_DO", 0x0, 0x3, 0x0c,
287 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO4 */
288 BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO */
289 BERLIN_PINCTRL_FUNCTION(0x3, "sts6"), /* SD */
290 BERLIN_PINCTRL_FUNCTION(0x4, "adac"), /* DBG2 */
291 BERLIN_PINCTRL_FUNCTION(0x6, "sd1b"), /* DAT0 */
292 BERLIN_PINCTRL_FUNCTION(0x7, "avio")), /* DBG2 */
293 BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x0f,
294 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO5 */
295 BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* MCLK */
296 BERLIN_PINCTRL_FUNCTION(0x3, "sts6"), /* VALD */
297 BERLIN_PINCTRL_FUNCTION(0x4, "adac_test"), /* MCLK */
298 BERLIN_PINCTRL_FUNCTION(0x6, "sd1b"), /* DAT1 */
299 BERLIN_PINCTRL_FUNCTION(0x7, "avio")), /* DBG3 */
300 BERLIN_PINCTRL_GROUP("SPDIFO", 0x0, 0x3, 0x12,
301 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO6 */
302 BERLIN_PINCTRL_FUNCTION(0x1, "spdifo"),
303 BERLIN_PINCTRL_FUNCTION(0x2, "avpll"), /* CLKO */
304 BERLIN_PINCTRL_FUNCTION(0x4, "adac")), /* DBG3 */
305 BERLIN_PINCTRL_GROUP("I2S2_MCLK", 0x0, 0x3, 0x15,
306 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO7 */
307 BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* MCLK */
308 BERLIN_PINCTRL_FUNCTION(0x4, "hdmi"), /* FBCLK */
309 BERLIN_PINCTRL_FUNCTION(0x5, "pdm")), /* CLKO */
310 BERLIN_PINCTRL_GROUP("I2S2_LRCKI", 0x0, 0x3, 0x18,
311 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO8 */
312 BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* LRCKI */
313 BERLIN_PINCTRL_FUNCTION(0x2, "pwm0"),
314 BERLIN_PINCTRL_FUNCTION(0x3, "sts7"), /* CLK */
315 BERLIN_PINCTRL_FUNCTION(0x4, "adac_test"), /* LRCK */
316 BERLIN_PINCTRL_FUNCTION(0x6, "sd1b")), /* DAT2 */
317 BERLIN_PINCTRL_GROUP("I2S2_BCLKI", 0x0, 0x3, 0x1b,
318 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO9 */
319 BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* BCLKI */
320 BERLIN_PINCTRL_FUNCTION(0x2, "pwm1"),
321 BERLIN_PINCTRL_FUNCTION(0x3, "sts7"), /* SOP */
322 BERLIN_PINCTRL_FUNCTION(0x4, "adac_test"), /* BCLK */
323 BERLIN_PINCTRL_FUNCTION(0x6, "sd1b")), /* DAT3 */
324 BERLIN_PINCTRL_GROUP("I2S2_DI0", 0x4, 0x3, 0x00,
325 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO10 */
326 BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI0 */
327 BERLIN_PINCTRL_FUNCTION(0x2, "pwm2"),
328 BERLIN_PINCTRL_FUNCTION(0x3, "sts7"), /* SD */
329 BERLIN_PINCTRL_FUNCTION(0x4, "adac_test"), /* SDIN */
330 BERLIN_PINCTRL_FUNCTION(0x5, "pdm"), /* DI0 */
331 BERLIN_PINCTRL_FUNCTION(0x6, "sd1b")), /* CDn */
332 BERLIN_PINCTRL_GROUP("I2S2_DI1", 0x4, 0x3, 0x03,
333 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* AVIO GPIO11 */
334 BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI1 */
335 BERLIN_PINCTRL_FUNCTION(0x2, "pwm3"),
336 BERLIN_PINCTRL_FUNCTION(0x3, "sts7"), /* VALD */
337 BERLIN_PINCTRL_FUNCTION(0x4, "adac_test"), /* PWMCLK */
338 BERLIN_PINCTRL_FUNCTION(0x5, "pdm"), /* DI1 */
339 BERLIN_PINCTRL_FUNCTION(0x6, "sd1b")), /* WP */
340};
341
342static const struct berlin_desc_group berlin4ct_sysmgr_pinctrl_groups[] = {
343 BERLIN_PINCTRL_GROUP("SM_TW2_SCL", 0x0, 0x3, 0x00,
344 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO19 */
345 BERLIN_PINCTRL_FUNCTION(0x1, "tw2")), /* SCL */
346 BERLIN_PINCTRL_GROUP("SM_TW2_SDA", 0x0, 0x3, 0x03,
347 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO20 */
348 BERLIN_PINCTRL_FUNCTION(0x1, "tw2")), /* SDA */
349 BERLIN_PINCTRL_GROUP("SM_TW3_SCL", 0x0, 0x3, 0x06,
350 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO21 */
351 BERLIN_PINCTRL_FUNCTION(0x1, "tw3")), /* SCL */
352 BERLIN_PINCTRL_GROUP("SM_TW3_SDA", 0x0, 0x3, 0x09,
353 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO22 */
354 BERLIN_PINCTRL_FUNCTION(0x1, "tw3")), /* SDA */
355 BERLIN_PINCTRL_GROUP("SM_TMS", 0x0, 0x3, 0x0c,
356 BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TMS */
357 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* SM GPIO0 */
358 BERLIN_PINCTRL_FUNCTION(0x2, "pwm0")),
359 BERLIN_PINCTRL_GROUP("SM_TDI", 0x0, 0x3, 0x0f,
360 BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDI */
361 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* SM GPIO1 */
362 BERLIN_PINCTRL_FUNCTION(0x2, "pwm1")),
363 BERLIN_PINCTRL_GROUP("SM_TDO", 0x0, 0x3, 0x12,
364 BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDO */
365 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO2 */
366 BERLIN_PINCTRL_GROUP("SM_URT0_TXD", 0x0, 0x3, 0x15,
367 BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* TXD */
368 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO3 */
369 BERLIN_PINCTRL_GROUP("SM_URT0_RXD", 0x0, 0x3, 0x18,
370 BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* RXD */
371 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO4 */
372 BERLIN_PINCTRL_GROUP("SM_URT1_TXD", 0x0, 0x3, 0x1b,
373 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO5 */
374 BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* TXD */
375 BERLIN_PINCTRL_FUNCTION(0x2, "eth1"), /* RXCLK */
376 BERLIN_PINCTRL_FUNCTION(0x3, "pwm2"),
377 BERLIN_PINCTRL_FUNCTION(0x4, "timer0"),
378 BERLIN_PINCTRL_FUNCTION(0x5, "clk_25m")),
379 BERLIN_PINCTRL_GROUP("SM_URT1_RXD", 0x4, 0x3, 0x00,
380 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO6 */
381 BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* RXD */
382 BERLIN_PINCTRL_FUNCTION(0x3, "pwm3"),
383 BERLIN_PINCTRL_FUNCTION(0x4, "timer1")),
384 BERLIN_PINCTRL_GROUP("SM_SPI2_SS0n", 0x4, 0x3, 0x03,
385 BERLIN_PINCTRL_FUNCTION(0x0, "spi2"), /* SS0 n*/
386 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO7 */
387 BERLIN_PINCTRL_GROUP("SM_SPI2_SS1n", 0x4, 0x3, 0x06,
388 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO8 */
389 BERLIN_PINCTRL_FUNCTION(0x1, "spi2")), /* SS1n */
390 BERLIN_PINCTRL_GROUP("SM_SPI2_SS2n", 0x4, 0x3, 0x09,
391 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO9 */
392 BERLIN_PINCTRL_FUNCTION(0x1, "spi2"), /* SS2n */
393 BERLIN_PINCTRL_FUNCTION(0x2, "eth1"), /* MDC */
394 BERLIN_PINCTRL_FUNCTION(0x3, "pwm0"),
395 BERLIN_PINCTRL_FUNCTION(0x4, "timer0"),
396 BERLIN_PINCTRL_FUNCTION(0x5, "clk_25m")),
397 BERLIN_PINCTRL_GROUP("SM_SPI2_SS3n", 0x4, 0x3, 0x0c,
398 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO10 */
399 BERLIN_PINCTRL_FUNCTION(0x1, "spi2"), /* SS3n */
400 BERLIN_PINCTRL_FUNCTION(0x2, "eth1"), /* MDIO */
401 BERLIN_PINCTRL_FUNCTION(0x3, "pwm1"),
402 BERLIN_PINCTRL_FUNCTION(0x4, "timer1")),
403 BERLIN_PINCTRL_GROUP("SM_SPI2_SDO", 0x4, 0x3, 0x0f,
404 BERLIN_PINCTRL_FUNCTION(0x0, "spi2"), /* SDO */
405 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO11 */
406 BERLIN_PINCTRL_GROUP("SM_SPI2_SDI", 0x4, 0x3, 0x12,
407 BERLIN_PINCTRL_FUNCTION(0x0, "spi2"), /* SDI */
408 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO12 */
409 BERLIN_PINCTRL_GROUP("SM_SPI2_SCLK", 0x4, 0x3, 0x15,
410 BERLIN_PINCTRL_FUNCTION(0x0, "spi2"), /* SCLK */
411 BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* SM GPIO13 */
412 BERLIN_PINCTRL_GROUP("SM_FE_LED0", 0x4, 0x3, 0x18,
413 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO14 */
414 BERLIN_PINCTRL_FUNCTION(0x2, "led")), /* LED0 */
415 BERLIN_PINCTRL_GROUP("SM_FE_LED1", 0x4, 0x3, 0x1b,
416 BERLIN_PINCTRL_FUNCTION(0x0, "pwr"),
417 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* SM GPIO 15 */
418 BERLIN_PINCTRL_FUNCTION(0x2, "led")), /* LED1 */
419 BERLIN_PINCTRL_GROUP("SM_FE_LED2", 0x8, 0x3, 0x00,
420 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO16 */
421 BERLIN_PINCTRL_FUNCTION(0x2, "led")), /* LED2 */
422 BERLIN_PINCTRL_GROUP("SM_HDMI_HPD", 0x8, 0x3, 0x03,
423 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO17 */
424 BERLIN_PINCTRL_FUNCTION(0x1, "hdmi")), /* HPD */
425 BERLIN_PINCTRL_GROUP("SM_HDMI_CEC", 0x8, 0x3, 0x06,
426 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* SM GPIO18 */
427 BERLIN_PINCTRL_FUNCTION(0x1, "hdmi")), /* CEC */
428};
429
430static const struct berlin_pinctrl_desc berlin4ct_soc_pinctrl_data = {
431 .groups = berlin4ct_soc_pinctrl_groups,
432 .ngroups = ARRAY_SIZE(berlin4ct_soc_pinctrl_groups),
433};
434
435static const struct berlin_pinctrl_desc berlin4ct_avio_pinctrl_data = {
436 .groups = berlin4ct_avio_pinctrl_groups,
437 .ngroups = ARRAY_SIZE(berlin4ct_avio_pinctrl_groups),
438};
439
440static const struct berlin_pinctrl_desc berlin4ct_sysmgr_pinctrl_data = {
441 .groups = berlin4ct_sysmgr_pinctrl_groups,
442 .ngroups = ARRAY_SIZE(berlin4ct_sysmgr_pinctrl_groups),
443};
444
445static const struct of_device_id berlin4ct_pinctrl_match[] = {
446 {
447 .compatible = "marvell,berlin4ct-soc-pinctrl",
448 .data = &berlin4ct_soc_pinctrl_data,
449 },
450 {
451 .compatible = "marvell,berlin4ct-avio-pinctrl",
452 .data = &berlin4ct_avio_pinctrl_data,
453 },
454 {
455 .compatible = "marvell,berlin4ct-system-pinctrl",
456 .data = &berlin4ct_sysmgr_pinctrl_data,
457 },
458 {}
459};
460MODULE_DEVICE_TABLE(of, berlin4ct_pinctrl_match);
461
462static int berlin4ct_pinctrl_probe(struct platform_device *pdev)
463{
464 const struct of_device_id *match =
465 of_match_device(berlin4ct_pinctrl_match, &pdev->dev);
466 struct regmap_config *rmconfig;
467 struct regmap *regmap;
468 struct resource *res;
469 void __iomem *base;
470
471 rmconfig = devm_kzalloc(&pdev->dev, sizeof(*rmconfig), GFP_KERNEL);
472 if (!rmconfig)
473 return -ENOMEM;
474
475 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
476 base = devm_ioremap_resource(&pdev->dev, res);
477 if (IS_ERR(base))
478 return PTR_ERR(base);
479
480 rmconfig->reg_bits = 32,
481 rmconfig->val_bits = 32,
482 rmconfig->reg_stride = 4,
483 rmconfig->max_register = resource_size(res);
484
485 regmap = devm_regmap_init_mmio(&pdev->dev, base, rmconfig);
486 if (IS_ERR(regmap))
487 return PTR_ERR(regmap);
488
489 return berlin_pinctrl_probe_regmap(pdev, match->data, regmap);
490}
491
492static struct platform_driver berlin4ct_pinctrl_driver = {
493 .probe = berlin4ct_pinctrl_probe,
494 .driver = {
495 .name = "berlin4ct-pinctrl",
496 .of_match_table = berlin4ct_pinctrl_match,
497 },
498};
499module_platform_driver(berlin4ct_pinctrl_driver);
500
501MODULE_AUTHOR("Jisheng Zhang <jszhang@marvell.com>");
502MODULE_DESCRIPTION("Marvell berlin4ct pinctrl driver");
503MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index f49580617055..46f2b4818da3 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2014 Marvell Technology Group Ltd. 4 * Copyright (C) 2014 Marvell Technology Group Ltd.
5 * 5 *
6 * Antoine Ténart <antoine.tenart@free-electrons.com> 6 * Antoine Ténart <antoine.tenart@free-electrons.com>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
@@ -292,20 +292,14 @@ static struct pinctrl_desc berlin_pctrl_desc = {
292 .owner = THIS_MODULE, 292 .owner = THIS_MODULE,
293}; 293};
294 294
295int berlin_pinctrl_probe(struct platform_device *pdev, 295int berlin_pinctrl_probe_regmap(struct platform_device *pdev,
296 const struct berlin_pinctrl_desc *desc) 296 const struct berlin_pinctrl_desc *desc,
297 struct regmap *regmap)
297{ 298{
298 struct device *dev = &pdev->dev; 299 struct device *dev = &pdev->dev;
299 struct device_node *parent_np = of_get_parent(dev->of_node);
300 struct berlin_pinctrl *pctrl; 300 struct berlin_pinctrl *pctrl;
301 struct regmap *regmap;
302 int ret; 301 int ret;
303 302
304 regmap = syscon_node_to_regmap(parent_np);
305 of_node_put(parent_np);
306 if (IS_ERR(regmap))
307 return PTR_ERR(regmap);
308
309 pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL); 303 pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
310 if (!pctrl) 304 if (!pctrl)
311 return -ENOMEM; 305 return -ENOMEM;
@@ -330,3 +324,17 @@ int berlin_pinctrl_probe(struct platform_device *pdev,
330 324
331 return 0; 325 return 0;
332} 326}
327
328int berlin_pinctrl_probe(struct platform_device *pdev,
329 const struct berlin_pinctrl_desc *desc)
330{
331 struct device *dev = &pdev->dev;
332 struct device_node *parent_np = of_get_parent(dev->of_node);
333 struct regmap *regmap = syscon_node_to_regmap(parent_np);
334
335 of_node_put(parent_np);
336 if (IS_ERR(regmap))
337 return PTR_ERR(regmap);
338
339 return berlin_pinctrl_probe_regmap(pdev, desc, regmap);
340}
diff --git a/drivers/pinctrl/berlin/berlin.h b/drivers/pinctrl/berlin/berlin.h
index e1aa84145194..e9b30f95b03e 100644
--- a/drivers/pinctrl/berlin/berlin.h
+++ b/drivers/pinctrl/berlin/berlin.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2014 Marvell Technology Group Ltd. 4 * Copyright (C) 2014 Marvell Technology Group Ltd.
5 * 5 *
6 * Antoine Ténart <antoine.tenart@free-electrons.com> 6 * Antoine Ténart <antoine.tenart@free-electrons.com>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
@@ -58,4 +58,8 @@ struct berlin_pinctrl_function {
58int berlin_pinctrl_probe(struct platform_device *pdev, 58int berlin_pinctrl_probe(struct platform_device *pdev,
59 const struct berlin_pinctrl_desc *desc); 59 const struct berlin_pinctrl_desc *desc);
60 60
61int berlin_pinctrl_probe_regmap(struct platform_device *pdev,
62 const struct berlin_pinctrl_desc *desc,
63 struct regmap *regmap);
64
61#endif /* __PINCTRL_BERLIN_H */ 65#endif /* __PINCTRL_BERLIN_H */
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 9638a00c67c2..2686a4450dfc 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1240,6 +1240,38 @@ int pinctrl_force_default(struct pinctrl_dev *pctldev)
1240} 1240}
1241EXPORT_SYMBOL_GPL(pinctrl_force_default); 1241EXPORT_SYMBOL_GPL(pinctrl_force_default);
1242 1242
1243/**
1244 * pinctrl_init_done() - tell pinctrl probe is done
1245 *
1246 * We'll use this time to switch the pins from "init" to "default" unless the
1247 * driver selected some other state.
1248 *
1249 * @dev: device to that's done probing
1250 */
1251int pinctrl_init_done(struct device *dev)
1252{
1253 struct dev_pin_info *pins = dev->pins;
1254 int ret;
1255
1256 if (!pins)
1257 return 0;
1258
1259 if (IS_ERR(pins->init_state))
1260 return 0; /* No such state */
1261
1262 if (pins->p->state != pins->init_state)
1263 return 0; /* Not at init anyway */
1264
1265 if (IS_ERR(pins->default_state))
1266 return 0; /* No default state */
1267
1268 ret = pinctrl_select_state(pins->p, pins->default_state);
1269 if (ret)
1270 dev_err(dev, "failed to activate default pinctrl state\n");
1271
1272 return ret;
1273}
1274
1243#ifdef CONFIG_PM 1275#ifdef CONFIG_PM
1244 1276
1245/** 1277/**
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index d7b98ba36825..a5bb93987378 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/of_device.h> 20#include <linux/of_device.h>
21#include <linux/of_address.h>
21#include <linux/pinctrl/machine.h> 22#include <linux/pinctrl/machine.h>
22#include <linux/pinctrl/pinconf.h> 23#include <linux/pinctrl/pinconf.h>
23#include <linux/pinctrl/pinctrl.h> 24#include <linux/pinctrl/pinctrl.h>
@@ -39,6 +40,7 @@ struct imx_pinctrl {
39 struct device *dev; 40 struct device *dev;
40 struct pinctrl_dev *pctl; 41 struct pinctrl_dev *pctl;
41 void __iomem *base; 42 void __iomem *base;
43 void __iomem *input_sel_base;
42 const struct imx_pinctrl_soc_info *info; 44 const struct imx_pinctrl_soc_info *info;
43}; 45};
44 46
@@ -254,7 +256,12 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
254 * Regular select input register can never be at offset 256 * Regular select input register can never be at offset
255 * 0, and we only print register value for regular case. 257 * 0, and we only print register value for regular case.
256 */ 258 */
257 writel(pin->input_val, ipctl->base + pin->input_reg); 259 if (ipctl->input_sel_base)
260 writel(pin->input_val, ipctl->input_sel_base +
261 pin->input_reg);
262 else
263 writel(pin->input_val, ipctl->base +
264 pin->input_reg);
258 dev_dbg(ipctl->dev, 265 dev_dbg(ipctl->dev,
259 "==>select_input: offset 0x%x val 0x%x\n", 266 "==>select_input: offset 0x%x val 0x%x\n",
260 pin->input_reg, pin->input_val); 267 pin->input_reg, pin->input_val);
@@ -542,6 +549,9 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
542 struct imx_pin_reg *pin_reg; 549 struct imx_pin_reg *pin_reg;
543 struct imx_pin *pin = &grp->pins[i]; 550 struct imx_pin *pin = &grp->pins[i];
544 551
552 if (!(info->flags & ZERO_OFFSET_VALID) && !mux_reg)
553 mux_reg = -1;
554
545 if (info->flags & SHARE_MUX_CONF_REG) { 555 if (info->flags & SHARE_MUX_CONF_REG) {
546 conf_reg = mux_reg; 556 conf_reg = mux_reg;
547 } else { 557 } else {
@@ -550,7 +560,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
550 conf_reg = -1; 560 conf_reg = -1;
551 } 561 }
552 562
553 pin_id = mux_reg ? mux_reg / 4 : conf_reg / 4; 563 pin_id = (mux_reg != -1) ? mux_reg / 4 : conf_reg / 4;
554 pin_reg = &info->pin_regs[pin_id]; 564 pin_reg = &info->pin_regs[pin_id];
555 pin->pin = pin_id; 565 pin->pin = pin_id;
556 grp->pin_ids[i] = pin_id; 566 grp->pin_ids[i] = pin_id;
@@ -580,7 +590,6 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
580 struct device_node *child; 590 struct device_node *child;
581 struct imx_pmx_func *func; 591 struct imx_pmx_func *func;
582 struct imx_pin_group *grp; 592 struct imx_pin_group *grp;
583 static u32 grp_index;
584 u32 i = 0; 593 u32 i = 0;
585 594
586 dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name); 595 dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
@@ -599,7 +608,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
599 608
600 for_each_child_of_node(np, child) { 609 for_each_child_of_node(np, child) {
601 func->groups[i] = child->name; 610 func->groups[i] = child->name;
602 grp = &info->groups[grp_index++]; 611 grp = &info->groups[info->group_index++];
603 imx_pinctrl_parse_groups(child, grp, info, i++); 612 imx_pinctrl_parse_groups(child, grp, info, i++);
604 } 613 }
605 614
@@ -683,6 +692,8 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
683int imx_pinctrl_probe(struct platform_device *pdev, 692int imx_pinctrl_probe(struct platform_device *pdev,
684 struct imx_pinctrl_soc_info *info) 693 struct imx_pinctrl_soc_info *info)
685{ 694{
695 struct device_node *dev_np = pdev->dev.of_node;
696 struct device_node *np;
686 struct imx_pinctrl *ipctl; 697 struct imx_pinctrl *ipctl;
687 struct resource *res; 698 struct resource *res;
688 int ret, i; 699 int ret, i;
@@ -713,6 +724,23 @@ int imx_pinctrl_probe(struct platform_device *pdev,
713 if (IS_ERR(ipctl->base)) 724 if (IS_ERR(ipctl->base))
714 return PTR_ERR(ipctl->base); 725 return PTR_ERR(ipctl->base);
715 726
727 if (of_property_read_bool(dev_np, "fsl,input-sel")) {
728 np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
729 if (np) {
730 ipctl->input_sel_base = of_iomap(np, 0);
731 if (IS_ERR(ipctl->input_sel_base)) {
732 of_node_put(np);
733 dev_err(&pdev->dev,
734 "iomuxc input select base address not found\n");
735 return PTR_ERR(ipctl->input_sel_base);
736 }
737 } else {
738 dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
739 return -EINVAL;
740 }
741 of_node_put(np);
742 }
743
716 imx_pinctrl_desc.name = dev_name(&pdev->dev); 744 imx_pinctrl_desc.name = dev_name(&pdev->dev);
717 imx_pinctrl_desc.pins = info->pins; 745 imx_pinctrl_desc.pins = info->pins;
718 imx_pinctrl_desc.npins = info->npins; 746 imx_pinctrl_desc.npins = info->npins;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 49e55d39f7c8..2a592f657c18 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -78,12 +78,14 @@ struct imx_pinctrl_soc_info {
78 struct imx_pin_reg *pin_regs; 78 struct imx_pin_reg *pin_regs;
79 struct imx_pin_group *groups; 79 struct imx_pin_group *groups;
80 unsigned int ngroups; 80 unsigned int ngroups;
81 unsigned int group_index;
81 struct imx_pmx_func *functions; 82 struct imx_pmx_func *functions;
82 unsigned int nfunctions; 83 unsigned int nfunctions;
83 unsigned int flags; 84 unsigned int flags;
84}; 85};
85 86
86#define SHARE_MUX_CONF_REG 0x1 87#define SHARE_MUX_CONF_REG 0x1
88#define ZERO_OFFSET_VALID 0x2
87 89
88#define NO_MUX 0x0 90#define NO_MUX 0x0
89#define NO_PAD 0x0 91#define NO_PAD 0x0
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index 1fa7530530dd..16dc925117de 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -174,6 +174,17 @@ enum imx7d_pads {
174 MX7D_PAD_ENET1_COL = 154, 174 MX7D_PAD_ENET1_COL = 154,
175}; 175};
176 176
177enum imx7d_lpsr_pads {
178 MX7D_PAD_GPIO1_IO00 = 0,
179 MX7D_PAD_GPIO1_IO01 = 1,
180 MX7D_PAD_GPIO1_IO02 = 2,
181 MX7D_PAD_GPIO1_IO03 = 3,
182 MX7D_PAD_GPIO1_IO04 = 4,
183 MX7D_PAD_GPIO1_IO05 = 5,
184 MX7D_PAD_GPIO1_IO06 = 6,
185 MX7D_PAD_GPIO1_IO07 = 7,
186};
187
177/* Pad names for the pinmux subsystem */ 188/* Pad names for the pinmux subsystem */
178static const struct pinctrl_pin_desc imx7d_pinctrl_pads[] = { 189static const struct pinctrl_pin_desc imx7d_pinctrl_pads[] = {
179 IMX_PINCTRL_PIN(MX7D_PAD_RESERVE0), 190 IMX_PINCTRL_PIN(MX7D_PAD_RESERVE0),
@@ -333,13 +344,32 @@ static const struct pinctrl_pin_desc imx7d_pinctrl_pads[] = {
333 IMX_PINCTRL_PIN(MX7D_PAD_ENET1_COL), 344 IMX_PINCTRL_PIN(MX7D_PAD_ENET1_COL),
334}; 345};
335 346
347/* Pad names for the pinmux subsystem */
348static const struct pinctrl_pin_desc imx7d_lpsr_pinctrl_pads[] = {
349 IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO00),
350 IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO01),
351 IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO02),
352 IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO03),
353 IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO04),
354 IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO05),
355 IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO06),
356 IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO07),
357};
358
336static struct imx_pinctrl_soc_info imx7d_pinctrl_info = { 359static struct imx_pinctrl_soc_info imx7d_pinctrl_info = {
337 .pins = imx7d_pinctrl_pads, 360 .pins = imx7d_pinctrl_pads,
338 .npins = ARRAY_SIZE(imx7d_pinctrl_pads), 361 .npins = ARRAY_SIZE(imx7d_pinctrl_pads),
339}; 362};
340 363
364static struct imx_pinctrl_soc_info imx7d_lpsr_pinctrl_info = {
365 .pins = imx7d_lpsr_pinctrl_pads,
366 .npins = ARRAY_SIZE(imx7d_lpsr_pinctrl_pads),
367 .flags = ZERO_OFFSET_VALID,
368};
369
341static struct of_device_id imx7d_pinctrl_of_match[] = { 370static struct of_device_id imx7d_pinctrl_of_match[] = {
342 { .compatible = "fsl,imx7d-iomuxc", .data = &imx7d_pinctrl_info, }, 371 { .compatible = "fsl,imx7d-iomuxc", .data = &imx7d_pinctrl_info, },
372 { .compatible = "fsl,imx7d-iomuxc-lpsr", .data = &imx7d_lpsr_pinctrl_info },
343 { /* sentinel */ } 373 { /* sentinel */ }
344}; 374};
345 375
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index f64eecb24755..6bbda6b4ab50 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -474,7 +474,7 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
474 f->name = fn = child->name; 474 f->name = fn = child->name;
475 } 475 }
476 f->ngroups++; 476 f->ngroups++;
477 }; 477 }
478 478
479 /* Get groups for each function */ 479 /* Get groups for each function */
480 idxf = 0; 480 idxf = 0;
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index fe5e07db0a95..4d2efad6553c 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -34,6 +34,14 @@ config PINCTRL_INTEL
34 select GPIOLIB 34 select GPIOLIB
35 select GPIOLIB_IRQCHIP 35 select GPIOLIB_IRQCHIP
36 36
37config PINCTRL_BROXTON
38 tristate "Intel Broxton pinctrl and GPIO driver"
39 depends on ACPI
40 select PINCTRL_INTEL
41 help
42 Broxton pinctrl driver provides an interface that allows
43 configuring of SoC pins and using them as GPIOs.
44
37config PINCTRL_SUNRISEPOINT 45config PINCTRL_SUNRISEPOINT
38 tristate "Intel Sunrisepoint pinctrl and GPIO driver" 46 tristate "Intel Sunrisepoint pinctrl and GPIO driver"
39 depends on ACPI 47 depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index fee756e1255b..03bc68e3546c 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -3,4 +3,5 @@
3obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o 3obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
4obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o 4obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
5obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o 5obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
6obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
6obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o 7obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index f79ea430f651..b59ce75b1947 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -696,6 +696,7 @@ static int byt_gpio_resume(struct device *dev)
696} 696}
697#endif 697#endif
698 698
699#ifdef CONFIG_PM
699static int byt_gpio_runtime_suspend(struct device *dev) 700static int byt_gpio_runtime_suspend(struct device *dev)
700{ 701{
701 return 0; 702 return 0;
@@ -705,6 +706,7 @@ static int byt_gpio_runtime_resume(struct device *dev)
705{ 706{
706 return 0; 707 return 0;
707} 708}
709#endif
708 710
709static const struct dev_pm_ops byt_gpio_pm_ops = { 711static const struct dev_pm_ops byt_gpio_pm_ops = {
710 SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume) 712 SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume)
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
new file mode 100644
index 000000000000..e42d5d4183f5
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -0,0 +1,1065 @@
1/*
2 * Intel Broxton SoC pinctrl/GPIO driver
3 *
4 * Copyright (C) 2015, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/acpi.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/pm.h>
16#include <linux/pinctrl/pinctrl.h>
17
18#include "pinctrl-intel.h"
19
20#define BXT_PAD_OWN 0x020
21#define BXT_HOSTSW_OWN 0x080
22#define BXT_PADCFGLOCK 0x090
23#define BXT_GPI_IE 0x110
24
25#define BXT_COMMUNITY(s, e) \
26 { \
27 .padown_offset = BXT_PAD_OWN, \
28 .padcfglock_offset = BXT_PADCFGLOCK, \
29 .hostown_offset = BXT_HOSTSW_OWN, \
30 .ie_offset = BXT_GPI_IE, \
31 .pin_base = (s), \
32 .npins = ((e) - (s) + 1), \
33 }
34
35/* BXT */
36static const struct pinctrl_pin_desc bxt_north_pins[] = {
37 PINCTRL_PIN(0, "GPIO_0"),
38 PINCTRL_PIN(1, "GPIO_1"),
39 PINCTRL_PIN(2, "GPIO_2"),
40 PINCTRL_PIN(3, "GPIO_3"),
41 PINCTRL_PIN(4, "GPIO_4"),
42 PINCTRL_PIN(5, "GPIO_5"),
43 PINCTRL_PIN(6, "GPIO_6"),
44 PINCTRL_PIN(7, "GPIO_7"),
45 PINCTRL_PIN(8, "GPIO_8"),
46 PINCTRL_PIN(9, "GPIO_9"),
47 PINCTRL_PIN(10, "GPIO_10"),
48 PINCTRL_PIN(11, "GPIO_11"),
49 PINCTRL_PIN(12, "GPIO_12"),
50 PINCTRL_PIN(13, "GPIO_13"),
51 PINCTRL_PIN(14, "GPIO_14"),
52 PINCTRL_PIN(15, "GPIO_15"),
53 PINCTRL_PIN(16, "GPIO_16"),
54 PINCTRL_PIN(17, "GPIO_17"),
55 PINCTRL_PIN(18, "GPIO_18"),
56 PINCTRL_PIN(19, "GPIO_19"),
57 PINCTRL_PIN(20, "GPIO_20"),
58 PINCTRL_PIN(21, "GPIO_21"),
59 PINCTRL_PIN(22, "GPIO_22"),
60 PINCTRL_PIN(23, "GPIO_23"),
61 PINCTRL_PIN(24, "GPIO_24"),
62 PINCTRL_PIN(25, "GPIO_25"),
63 PINCTRL_PIN(26, "GPIO_26"),
64 PINCTRL_PIN(27, "GPIO_27"),
65 PINCTRL_PIN(28, "GPIO_28"),
66 PINCTRL_PIN(29, "GPIO_29"),
67 PINCTRL_PIN(30, "GPIO_30"),
68 PINCTRL_PIN(31, "GPIO_31"),
69 PINCTRL_PIN(32, "GPIO_32"),
70 PINCTRL_PIN(33, "GPIO_33"),
71 PINCTRL_PIN(34, "PWM0"),
72 PINCTRL_PIN(35, "PWM1"),
73 PINCTRL_PIN(36, "PWM2"),
74 PINCTRL_PIN(37, "PWM3"),
75 PINCTRL_PIN(38, "LPSS_UART0_RXD"),
76 PINCTRL_PIN(39, "LPSS_UART0_TXD"),
77 PINCTRL_PIN(40, "LPSS_UART0_RTS_B"),
78 PINCTRL_PIN(41, "LPSS_UART0_CTS_B"),
79 PINCTRL_PIN(42, "LPSS_UART1_RXD"),
80 PINCTRL_PIN(43, "LPSS_UART1_TXD"),
81 PINCTRL_PIN(44, "LPSS_UART1_RTS_B"),
82 PINCTRL_PIN(45, "LPSS_UART1_CTS_B"),
83 PINCTRL_PIN(46, "LPSS_UART2_RXD"),
84 PINCTRL_PIN(47, "LPSS_UART2_TXD"),
85 PINCTRL_PIN(48, "LPSS_UART2_RTS_B"),
86 PINCTRL_PIN(49, "LPSS_UART2_CTS_B"),
87 PINCTRL_PIN(50, "ISH_UART0_RXD"),
88 PINCTRL_PIN(51, "ISH_UART0_TXT"),
89 PINCTRL_PIN(52, "ISH_UART0_RTS_B"),
90 PINCTRL_PIN(53, "ISH_UART0_CTS_B"),
91 PINCTRL_PIN(54, "ISH_UART1_RXD"),
92 PINCTRL_PIN(55, "ISH_UART1_TXT"),
93 PINCTRL_PIN(56, "ISH_UART1_RTS_B"),
94 PINCTRL_PIN(57, "ISH_UART1_CTS_B"),
95 PINCTRL_PIN(58, "ISH_UART2_RXD"),
96 PINCTRL_PIN(59, "ISH_UART2_TXD"),
97 PINCTRL_PIN(60, "ISH_UART2_RTS_B"),
98 PINCTRL_PIN(61, "ISH_UART2_CTS_B"),
99 PINCTRL_PIN(62, "GP_CAMERASB00"),
100 PINCTRL_PIN(63, "GP_CAMERASB01"),
101 PINCTRL_PIN(64, "GP_CAMERASB02"),
102 PINCTRL_PIN(65, "GP_CAMERASB03"),
103 PINCTRL_PIN(66, "GP_CAMERASB04"),
104 PINCTRL_PIN(67, "GP_CAMERASB05"),
105 PINCTRL_PIN(68, "GP_CAMERASB06"),
106 PINCTRL_PIN(69, "GP_CAMERASB07"),
107 PINCTRL_PIN(70, "GP_CAMERASB08"),
108 PINCTRL_PIN(71, "GP_CAMERASB09"),
109 PINCTRL_PIN(72, "GP_CAMERASB10"),
110 PINCTRL_PIN(73, "GP_CAMERASB11"),
111 PINCTRL_PIN(74, "TCK"),
112 PINCTRL_PIN(75, "TRST_B"),
113 PINCTRL_PIN(76, "TMS"),
114 PINCTRL_PIN(77, "TDI"),
115 PINCTRL_PIN(78, "CX_PMODE"),
116 PINCTRL_PIN(79, "CX_PREQ_B"),
117 PINCTRL_PIN(80, "JTAGX"),
118 PINCTRL_PIN(81, "CX_PRDY_B"),
119 PINCTRL_PIN(82, "TDO"),
120};
121
122static const unsigned bxt_north_pwm0_pins[] = { 34 };
123static const unsigned bxt_north_pwm1_pins[] = { 35 };
124static const unsigned bxt_north_pwm2_pins[] = { 36 };
125static const unsigned bxt_north_pwm3_pins[] = { 37 };
126static const unsigned bxt_north_uart0_pins[] = { 38, 39, 40, 41 };
127static const unsigned bxt_north_uart1_pins[] = { 42, 43, 44, 45 };
128static const unsigned bxt_north_uart2_pins[] = { 46, 47, 48, 49 };
129static const unsigned bxt_north_uart0b_pins[] = { 50, 51, 52, 53 };
130static const unsigned bxt_north_uart1b_pins[] = { 54, 55, 56, 57 };
131static const unsigned bxt_north_uart2b_pins[] = { 58, 59, 60, 61 };
132static const unsigned bxt_north_uart3_pins[] = { 58, 59, 60, 61 };
133
134static const struct intel_pingroup bxt_north_groups[] = {
135 PIN_GROUP("pwm0_grp", bxt_north_pwm0_pins, 1),
136 PIN_GROUP("pwm1_grp", bxt_north_pwm1_pins, 1),
137 PIN_GROUP("pwm2_grp", bxt_north_pwm2_pins, 1),
138 PIN_GROUP("pwm3_grp", bxt_north_pwm3_pins, 1),
139 PIN_GROUP("uart0_grp", bxt_north_uart0_pins, 1),
140 PIN_GROUP("uart1_grp", bxt_north_uart1_pins, 1),
141 PIN_GROUP("uart2_grp", bxt_north_uart2_pins, 1),
142 PIN_GROUP("uart0b_grp", bxt_north_uart0b_pins, 2),
143 PIN_GROUP("uart1b_grp", bxt_north_uart1b_pins, 2),
144 PIN_GROUP("uart2b_grp", bxt_north_uart2b_pins, 2),
145 PIN_GROUP("uart3_grp", bxt_north_uart3_pins, 3),
146};
147
148static const char * const bxt_north_pwm0_groups[] = { "pwm0_grp" };
149static const char * const bxt_north_pwm1_groups[] = { "pwm1_grp" };
150static const char * const bxt_north_pwm2_groups[] = { "pwm2_grp" };
151static const char * const bxt_north_pwm3_groups[] = { "pwm3_grp" };
152static const char * const bxt_north_uart0_groups[] = {
153 "uart0_grp", "uart0b_grp",
154};
155static const char * const bxt_north_uart1_groups[] = {
156 "uart1_grp", "uart1b_grp",
157};
158static const char * const bxt_north_uart2_groups[] = {
159 "uart2_grp", "uart2b_grp",
160};
161static const char * const bxt_north_uart3_groups[] = { "uart3_grp" };
162
163static const struct intel_function bxt_north_functions[] = {
164 FUNCTION("pwm0", bxt_north_pwm0_groups),
165 FUNCTION("pwm1", bxt_north_pwm1_groups),
166 FUNCTION("pwm2", bxt_north_pwm2_groups),
167 FUNCTION("pwm3", bxt_north_pwm3_groups),
168 FUNCTION("uart0", bxt_north_uart0_groups),
169 FUNCTION("uart1", bxt_north_uart1_groups),
170 FUNCTION("uart2", bxt_north_uart2_groups),
171 FUNCTION("uart3", bxt_north_uart3_groups),
172};
173
174static const struct intel_community bxt_north_communities[] = {
175 BXT_COMMUNITY(0, 82),
176};
177
178static const struct intel_pinctrl_soc_data bxt_north_soc_data = {
179 .uid = "1",
180 .pins = bxt_north_pins,
181 .npins = ARRAY_SIZE(bxt_north_pins),
182 .groups = bxt_north_groups,
183 .ngroups = ARRAY_SIZE(bxt_north_groups),
184 .functions = bxt_north_functions,
185 .nfunctions = ARRAY_SIZE(bxt_north_functions),
186 .communities = bxt_north_communities,
187 .ncommunities = ARRAY_SIZE(bxt_north_communities),
188};
189
190static const struct pinctrl_pin_desc bxt_northwest_pins[] = {
191 PINCTRL_PIN(0, "PMC_SPI_FS0"),
192 PINCTRL_PIN(1, "PMC_SPI_FS1"),
193 PINCTRL_PIN(2, "PMC_SPI_FS2"),
194 PINCTRL_PIN(3, "PMC_SPI_RXD"),
195 PINCTRL_PIN(4, "PMC_SPI_TXD"),
196 PINCTRL_PIN(5, "PMC_SPI_CLK"),
197 PINCTRL_PIN(6, "PMC_UART_RXD"),
198 PINCTRL_PIN(7, "PMC_UART_TXD"),
199 PINCTRL_PIN(8, "PMIC_PWRGOOD"),
200 PINCTRL_PIN(9, "PMIC_RESET_B"),
201 PINCTRL_PIN(10, "RTC_CLK"),
202 PINCTRL_PIN(11, "PMIC_SDWN_B"),
203 PINCTRL_PIN(12, "PMIC_BCUDISW2"),
204 PINCTRL_PIN(13, "PMIC_BCUDISCRIT"),
205 PINCTRL_PIN(14, "PMIC_THERMTRIP_B"),
206 PINCTRL_PIN(15, "PMIC_STDBY"),
207 PINCTRL_PIN(16, "SVID0_ALERT_B"),
208 PINCTRL_PIN(17, "SVID0_DATA"),
209 PINCTRL_PIN(18, "SVID0_CLK"),
210 PINCTRL_PIN(19, "PMIC_I2C_SCL"),
211 PINCTRL_PIN(20, "PMIC_I2C_SDA"),
212 PINCTRL_PIN(21, "AVS_I2S1_MCLK"),
213 PINCTRL_PIN(22, "AVS_I2S1_BCLK"),
214 PINCTRL_PIN(23, "AVS_I2S1_WS_SYNC"),
215 PINCTRL_PIN(24, "AVS_I2S1_SDI"),
216 PINCTRL_PIN(25, "AVS_I2S1_SDO"),
217 PINCTRL_PIN(26, "AVS_M_CLK_A1"),
218 PINCTRL_PIN(27, "AVS_M_CLK_B1"),
219 PINCTRL_PIN(28, "AVS_M_DATA_1"),
220 PINCTRL_PIN(29, "AVS_M_CLK_AB2"),
221 PINCTRL_PIN(30, "AVS_M_DATA_2"),
222 PINCTRL_PIN(31, "AVS_I2S2_MCLK"),
223 PINCTRL_PIN(32, "AVS_I2S2_BCLK"),
224 PINCTRL_PIN(33, "AVS_I2S2_WS_SYNC"),
225 PINCTRL_PIN(34, "AVS_I2S2_SDI"),
226 PINCTRL_PIN(35, "AVS_I2S2_SDOK"),
227 PINCTRL_PIN(36, "AVS_I2S3_BCLK"),
228 PINCTRL_PIN(37, "AVS_I2S3_WS_SYNC"),
229 PINCTRL_PIN(38, "AVS_I2S3_SDI"),
230 PINCTRL_PIN(39, "AVS_I2S3_SDO"),
231 PINCTRL_PIN(40, "AVS_I2S4_BCLK"),
232 PINCTRL_PIN(41, "AVS_I2S4_WS_SYNC"),
233 PINCTRL_PIN(42, "AVS_I2S4_SDI"),
234 PINCTRL_PIN(43, "AVS_I2S4_SDO"),
235 PINCTRL_PIN(44, "PROCHOT_B"),
236 PINCTRL_PIN(45, "FST_SPI_CS0_B"),
237 PINCTRL_PIN(46, "FST_SPI_CS1_B"),
238 PINCTRL_PIN(47, "FST_SPI_MOSI_IO0"),
239 PINCTRL_PIN(48, "FST_SPI_MISO_IO1"),
240 PINCTRL_PIN(49, "FST_SPI_IO2"),
241 PINCTRL_PIN(50, "FST_SPI_IO3"),
242 PINCTRL_PIN(51, "FST_SPI_CLK"),
243 PINCTRL_PIN(52, "FST_SPI_CLK_FB"),
244 PINCTRL_PIN(53, "GP_SSP_0_CLK"),
245 PINCTRL_PIN(54, "GP_SSP_0_FS0"),
246 PINCTRL_PIN(55, "GP_SSP_0_FS1"),
247 PINCTRL_PIN(56, "GP_SSP_0_FS2"),
248 PINCTRL_PIN(57, "GP_SSP_0_RXD"),
249 PINCTRL_PIN(58, "GP_SSP_0_TXD"),
250 PINCTRL_PIN(59, "GP_SSP_1_CLK"),
251 PINCTRL_PIN(60, "GP_SSP_1_FS0"),
252 PINCTRL_PIN(61, "GP_SSP_1_FS1"),
253 PINCTRL_PIN(62, "GP_SSP_1_FS2"),
254 PINCTRL_PIN(63, "GP_SSP_1_FS3"),
255 PINCTRL_PIN(64, "GP_SSP_1_RXD"),
256 PINCTRL_PIN(65, "GP_SSP_1_TXD"),
257 PINCTRL_PIN(66, "GP_SSP_2_CLK"),
258 PINCTRL_PIN(67, "GP_SSP_2_FS0"),
259 PINCTRL_PIN(68, "GP_SSP_2_FS1"),
260 PINCTRL_PIN(69, "GP_SSP_2_FS2"),
261 PINCTRL_PIN(70, "GP_SSP_2_RXD"),
262 PINCTRL_PIN(71, "GP_SSP_2_TXD"),
263};
264
265static const unsigned bxt_northwest_ssp0_pins[] = { 53, 54, 55, 56, 57, 58 };
266static const unsigned bxt_northwest_ssp1_pins[] = {
267 59, 60, 61, 62, 63, 64, 65
268};
269static const unsigned bxt_northwest_ssp2_pins[] = { 66, 67, 68, 69, 70, 71 };
270static const unsigned bxt_northwest_uart3_pins[] = { 67, 68, 69, 70 };
271
272static const struct intel_pingroup bxt_northwest_groups[] = {
273 PIN_GROUP("ssp0_grp", bxt_northwest_ssp0_pins, 1),
274 PIN_GROUP("ssp1_grp", bxt_northwest_ssp1_pins, 1),
275 PIN_GROUP("ssp2_grp", bxt_northwest_ssp2_pins, 1),
276 PIN_GROUP("uart3_grp", bxt_northwest_uart3_pins, 2),
277};
278
279static const char * const bxt_northwest_ssp0_groups[] = { "ssp0_grp" };
280static const char * const bxt_northwest_ssp1_groups[] = { "ssp1_grp" };
281static const char * const bxt_northwest_ssp2_groups[] = { "ssp2_grp" };
282static const char * const bxt_northwest_uart3_groups[] = { "uart3_grp" };
283
284static const struct intel_function bxt_northwest_functions[] = {
285 FUNCTION("ssp0", bxt_northwest_ssp0_groups),
286 FUNCTION("ssp1", bxt_northwest_ssp1_groups),
287 FUNCTION("ssp2", bxt_northwest_ssp2_groups),
288 FUNCTION("uart3", bxt_northwest_uart3_groups),
289};
290
291static const struct intel_community bxt_northwest_communities[] = {
292 BXT_COMMUNITY(0, 71),
293};
294
295static const struct intel_pinctrl_soc_data bxt_northwest_soc_data = {
296 .uid = "2",
297 .pins = bxt_northwest_pins,
298 .npins = ARRAY_SIZE(bxt_northwest_pins),
299 .groups = bxt_northwest_groups,
300 .ngroups = ARRAY_SIZE(bxt_northwest_groups),
301 .functions = bxt_northwest_functions,
302 .nfunctions = ARRAY_SIZE(bxt_northwest_functions),
303 .communities = bxt_northwest_communities,
304 .ncommunities = ARRAY_SIZE(bxt_northwest_communities),
305};
306
307static const struct pinctrl_pin_desc bxt_west_pins[] = {
308 PINCTRL_PIN(0, "LPSS_I2C0_SDA"),
309 PINCTRL_PIN(1, "LPSS_I2C0_SCL"),
310 PINCTRL_PIN(2, "LPSS_I2C1_SDA"),
311 PINCTRL_PIN(3, "LPSS_I2C1_SCL"),
312 PINCTRL_PIN(4, "LPSS_I2C2_SDA"),
313 PINCTRL_PIN(5, "LPSS_I2C2_SCL"),
314 PINCTRL_PIN(6, "LPSS_I2C3_SDA"),
315 PINCTRL_PIN(7, "LPSS_I2C3_SCL"),
316 PINCTRL_PIN(8, "LPSS_I2C4_SDA"),
317 PINCTRL_PIN(9, "LPSS_I2C4_SCL"),
318 PINCTRL_PIN(10, "LPSS_I2C5_SDA"),
319 PINCTRL_PIN(11, "LPSS_I2C5_SCL"),
320 PINCTRL_PIN(12, "LPSS_I2C6_SDA"),
321 PINCTRL_PIN(13, "LPSS_I2C6_SCL"),
322 PINCTRL_PIN(14, "LPSS_I2C7_SDA"),
323 PINCTRL_PIN(15, "LPSS_I2C7_SCL"),
324 PINCTRL_PIN(16, "ISH_I2C0_SDA"),
325 PINCTRL_PIN(17, "ISH_I2C0_SCL"),
326 PINCTRL_PIN(18, "ISH_I2C1_SDA"),
327 PINCTRL_PIN(19, "ISH_I2C1_SCL"),
328 PINCTRL_PIN(20, "ISH_I2C2_SDA"),
329 PINCTRL_PIN(21, "ISH_I2C2_SCL"),
330 PINCTRL_PIN(22, "ISH_GPIO_0"),
331 PINCTRL_PIN(23, "ISH_GPIO_1"),
332 PINCTRL_PIN(24, "ISH_GPIO_2"),
333 PINCTRL_PIN(25, "ISH_GPIO_3"),
334 PINCTRL_PIN(26, "ISH_GPIO_4"),
335 PINCTRL_PIN(27, "ISH_GPIO_5"),
336 PINCTRL_PIN(28, "ISH_GPIO_6"),
337 PINCTRL_PIN(29, "ISH_GPIO_7"),
338 PINCTRL_PIN(30, "ISH_GPIO_8"),
339 PINCTRL_PIN(31, "ISH_GPIO_9"),
340 PINCTRL_PIN(32, "MODEM_CLKREQ"),
341 PINCTRL_PIN(33, "DGCLKDBG_PMC_0"),
342 PINCTRL_PIN(34, "DGCLKDBG_PMC_1"),
343 PINCTRL_PIN(35, "DGCLKDBG_PMC_2"),
344 PINCTRL_PIN(36, "DGCLKDBG_ICLK_0"),
345 PINCTRL_PIN(37, "DGCLKDBG_ICLK_1"),
346 PINCTRL_PIN(38, "OSC_CLK_OUT_0"),
347 PINCTRL_PIN(39, "OSC_CLK_OUT_1"),
348 PINCTRL_PIN(40, "OSC_CLK_OUT_2"),
349 PINCTRL_PIN(41, "OSC_CLK_OUT_3"),
350};
351
352static const unsigned bxt_west_i2c0_pins[] = { 0, 1 };
353static const unsigned bxt_west_i2c1_pins[] = { 2, 3 };
354static const unsigned bxt_west_i2c2_pins[] = { 4, 5 };
355static const unsigned bxt_west_i2c3_pins[] = { 6, 7 };
356static const unsigned bxt_west_i2c4_pins[] = { 8, 9 };
357static const unsigned bxt_west_i2c5_pins[] = { 10, 11 };
358static const unsigned bxt_west_i2c6_pins[] = { 12, 13 };
359static const unsigned bxt_west_i2c7_pins[] = { 14, 15 };
360static const unsigned bxt_west_i2c5b_pins[] = { 16, 17 };
361static const unsigned bxt_west_i2c6b_pins[] = { 18, 19 };
362static const unsigned bxt_west_i2c7b_pins[] = { 20, 21 };
363
364static const struct intel_pingroup bxt_west_groups[] = {
365 PIN_GROUP("i2c0_grp", bxt_west_i2c0_pins, 1),
366 PIN_GROUP("i2c1_grp", bxt_west_i2c1_pins, 1),
367 PIN_GROUP("i2c2_grp", bxt_west_i2c2_pins, 1),
368 PIN_GROUP("i2c3_grp", bxt_west_i2c3_pins, 1),
369 PIN_GROUP("i2c4_grp", bxt_west_i2c4_pins, 1),
370 PIN_GROUP("i2c5_grp", bxt_west_i2c5_pins, 1),
371 PIN_GROUP("i2c6_grp", bxt_west_i2c6_pins, 1),
372 PIN_GROUP("i2c7_grp", bxt_west_i2c7_pins, 1),
373 PIN_GROUP("i2c5b_grp", bxt_west_i2c5b_pins, 2),
374 PIN_GROUP("i2c6b_grp", bxt_west_i2c6b_pins, 2),
375 PIN_GROUP("i2c7b_grp", bxt_west_i2c7b_pins, 2),
376};
377
378static const char * const bxt_west_i2c0_groups[] = { "i2c0_grp" };
379static const char * const bxt_west_i2c1_groups[] = { "i2c1_grp" };
380static const char * const bxt_west_i2c2_groups[] = { "i2c2_grp" };
381static const char * const bxt_west_i2c3_groups[] = { "i2c3_grp" };
382static const char * const bxt_west_i2c4_groups[] = { "i2c4_grp" };
383static const char * const bxt_west_i2c5_groups[] = { "i2c5_grp", "i2c5b_grp" };
384static const char * const bxt_west_i2c6_groups[] = { "i2c6_grp", "i2c6b_grp" };
385static const char * const bxt_west_i2c7_groups[] = { "i2c7_grp", "i2c7b_grp" };
386
387static const struct intel_function bxt_west_functions[] = {
388 FUNCTION("i2c0", bxt_west_i2c0_groups),
389 FUNCTION("i2c1", bxt_west_i2c1_groups),
390 FUNCTION("i2c2", bxt_west_i2c2_groups),
391 FUNCTION("i2c3", bxt_west_i2c3_groups),
392 FUNCTION("i2c4", bxt_west_i2c4_groups),
393 FUNCTION("i2c5", bxt_west_i2c5_groups),
394 FUNCTION("i2c6", bxt_west_i2c6_groups),
395 FUNCTION("i2c7", bxt_west_i2c7_groups),
396};
397
398static const struct intel_community bxt_west_communities[] = {
399 BXT_COMMUNITY(0, 41),
400};
401
402static const struct intel_pinctrl_soc_data bxt_west_soc_data = {
403 .uid = "3",
404 .pins = bxt_west_pins,
405 .npins = ARRAY_SIZE(bxt_west_pins),
406 .groups = bxt_west_groups,
407 .ngroups = ARRAY_SIZE(bxt_west_groups),
408 .functions = bxt_west_functions,
409 .nfunctions = ARRAY_SIZE(bxt_west_functions),
410 .communities = bxt_west_communities,
411 .ncommunities = ARRAY_SIZE(bxt_west_communities),
412};
413
414static const struct pinctrl_pin_desc bxt_southwest_pins[] = {
415 PINCTRL_PIN(0, "EMMC0_CLK"),
416 PINCTRL_PIN(1, "EMMC0_D0"),
417 PINCTRL_PIN(2, "EMMC0_D1"),
418 PINCTRL_PIN(3, "EMMC0_D2"),
419 PINCTRL_PIN(4, "EMMC0_D3"),
420 PINCTRL_PIN(5, "EMMC0_D4"),
421 PINCTRL_PIN(6, "EMMC0_D5"),
422 PINCTRL_PIN(7, "EMMC0_D6"),
423 PINCTRL_PIN(8, "EMMC0_D7"),
424 PINCTRL_PIN(9, "EMMC0_CMD"),
425 PINCTRL_PIN(10, "SDIO_CLK"),
426 PINCTRL_PIN(11, "SDIO_D0"),
427 PINCTRL_PIN(12, "SDIO_D1"),
428 PINCTRL_PIN(13, "SDIO_D2"),
429 PINCTRL_PIN(14, "SDIO_D3"),
430 PINCTRL_PIN(15, "SDIO_CMD"),
431 PINCTRL_PIN(16, "SDCARD_CLK"),
432 PINCTRL_PIN(17, "SDCARD_D0"),
433 PINCTRL_PIN(18, "SDCARD_D1"),
434 PINCTRL_PIN(19, "SDCARD_D2"),
435 PINCTRL_PIN(20, "SDCARD_D3"),
436 PINCTRL_PIN(21, "SDCARD_CD_B"),
437 PINCTRL_PIN(22, "SDCARD_CMD"),
438 PINCTRL_PIN(23, "SDCARD_LVL_CLK_FB"),
439 PINCTRL_PIN(24, "SDCARD_LVL_CMD_DIR"),
440 PINCTRL_PIN(25, "SDCARD_LVL_DAT_DIR"),
441 PINCTRL_PIN(26, "EMMC0_STROBE"),
442 PINCTRL_PIN(27, "SDIO_PWR_DOWN_B"),
443 PINCTRL_PIN(28, "SDCARD_PWR_DOWN_B"),
444 PINCTRL_PIN(29, "SDCARD_LVL_SEL"),
445 PINCTRL_PIN(30, "SDCARD_LVL_WP"),
446};
447
448static const unsigned bxt_southwest_emmc0_pins[] = {
449 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 26,
450};
451static const unsigned bxt_southwest_sdio_pins[] = {
452 10, 11, 12, 13, 14, 15, 27,
453};
454static const unsigned bxt_southwest_sdcard_pins[] = {
455 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 30,
456};
457
458static const struct intel_pingroup bxt_southwest_groups[] = {
459 PIN_GROUP("emmc0_grp", bxt_southwest_emmc0_pins, 1),
460 PIN_GROUP("sdio_grp", bxt_southwest_sdio_pins, 1),
461 PIN_GROUP("sdcard_grp", bxt_southwest_sdcard_pins, 1),
462};
463
464static const char * const bxt_southwest_emmc0_groups[] = { "emmc0_grp" };
465static const char * const bxt_southwest_sdio_groups[] = { "sdio_grp" };
466static const char * const bxt_southwest_sdcard_groups[] = { "sdcard_grp" };
467
468static const struct intel_function bxt_southwest_functions[] = {
469 FUNCTION("emmc0", bxt_southwest_emmc0_groups),
470 FUNCTION("sdio", bxt_southwest_sdio_groups),
471 FUNCTION("sdcard", bxt_southwest_sdcard_groups),
472};
473
474static const struct intel_community bxt_southwest_communities[] = {
475 BXT_COMMUNITY(0, 30),
476};
477
478static const struct intel_pinctrl_soc_data bxt_southwest_soc_data = {
479 .uid = "4",
480 .pins = bxt_southwest_pins,
481 .npins = ARRAY_SIZE(bxt_southwest_pins),
482 .groups = bxt_southwest_groups,
483 .ngroups = ARRAY_SIZE(bxt_southwest_groups),
484 .functions = bxt_southwest_functions,
485 .nfunctions = ARRAY_SIZE(bxt_southwest_functions),
486 .communities = bxt_southwest_communities,
487 .ncommunities = ARRAY_SIZE(bxt_southwest_communities),
488};
489
490static const struct pinctrl_pin_desc bxt_south_pins[] = {
491 PINCTRL_PIN(0, "HV_DDI0_DDC_SDA"),
492 PINCTRL_PIN(1, "HV_DDI0_DDC_SCL"),
493 PINCTRL_PIN(2, "HV_DDI1_DDC_SDA"),
494 PINCTRL_PIN(3, "HV_DDI1_DDC_SCL"),
495 PINCTRL_PIN(4, "DBI_SDA"),
496 PINCTRL_PIN(5, "DBI_SCL"),
497 PINCTRL_PIN(6, "PANEL0_VDDEN"),
498 PINCTRL_PIN(7, "PANEL0_BKLTEN"),
499 PINCTRL_PIN(8, "PANEL0_BKLTCTL"),
500 PINCTRL_PIN(9, "PANEL1_VDDEN"),
501 PINCTRL_PIN(10, "PANEL1_BKLTEN"),
502 PINCTRL_PIN(11, "PANEL1_BKLTCTL"),
503 PINCTRL_PIN(12, "DBI_CSX"),
504 PINCTRL_PIN(13, "DBI_RESX"),
505 PINCTRL_PIN(14, "GP_INTD_DSI_TE1"),
506 PINCTRL_PIN(15, "GP_INTD_DSI_TE2"),
507 PINCTRL_PIN(16, "USB_OC0_B"),
508 PINCTRL_PIN(17, "USB_OC1_B"),
509 PINCTRL_PIN(18, "MEX_WAKE0_B"),
510 PINCTRL_PIN(19, "MEX_WAKE1_B"),
511};
512
513static const struct intel_community bxt_south_communities[] = {
514 BXT_COMMUNITY(0, 19),
515};
516
517static const struct intel_pinctrl_soc_data bxt_south_soc_data = {
518 .uid = "5",
519 .pins = bxt_south_pins,
520 .npins = ARRAY_SIZE(bxt_south_pins),
521 .communities = bxt_south_communities,
522 .ncommunities = ARRAY_SIZE(bxt_south_communities),
523};
524
525static const struct intel_pinctrl_soc_data *bxt_pinctrl_soc_data[] = {
526 &bxt_north_soc_data,
527 &bxt_northwest_soc_data,
528 &bxt_west_soc_data,
529 &bxt_southwest_soc_data,
530 &bxt_south_soc_data,
531 NULL,
532};
533
534/* APL */
535static const struct pinctrl_pin_desc apl_north_pins[] = {
536 PINCTRL_PIN(0, "GPIO_0"),
537 PINCTRL_PIN(1, "GPIO_1"),
538 PINCTRL_PIN(2, "GPIO_2"),
539 PINCTRL_PIN(3, "GPIO_3"),
540 PINCTRL_PIN(4, "GPIO_4"),
541 PINCTRL_PIN(5, "GPIO_5"),
542 PINCTRL_PIN(6, "GPIO_6"),
543 PINCTRL_PIN(7, "GPIO_7"),
544 PINCTRL_PIN(8, "GPIO_8"),
545 PINCTRL_PIN(9, "GPIO_9"),
546 PINCTRL_PIN(10, "GPIO_10"),
547 PINCTRL_PIN(11, "GPIO_11"),
548 PINCTRL_PIN(12, "GPIO_12"),
549 PINCTRL_PIN(13, "GPIO_13"),
550 PINCTRL_PIN(14, "GPIO_14"),
551 PINCTRL_PIN(15, "GPIO_15"),
552 PINCTRL_PIN(16, "GPIO_16"),
553 PINCTRL_PIN(17, "GPIO_17"),
554 PINCTRL_PIN(18, "GPIO_18"),
555 PINCTRL_PIN(19, "GPIO_19"),
556 PINCTRL_PIN(20, "GPIO_20"),
557 PINCTRL_PIN(21, "GPIO_21"),
558 PINCTRL_PIN(22, "GPIO_22"),
559 PINCTRL_PIN(23, "GPIO_23"),
560 PINCTRL_PIN(24, "GPIO_24"),
561 PINCTRL_PIN(25, "GPIO_25"),
562 PINCTRL_PIN(26, "GPIO_26"),
563 PINCTRL_PIN(27, "GPIO_27"),
564 PINCTRL_PIN(28, "GPIO_28"),
565 PINCTRL_PIN(29, "GPIO_29"),
566 PINCTRL_PIN(30, "GPIO_30"),
567 PINCTRL_PIN(31, "GPIO_31"),
568 PINCTRL_PIN(32, "GPIO_32"),
569 PINCTRL_PIN(33, "GPIO_33"),
570 PINCTRL_PIN(34, "PWM0"),
571 PINCTRL_PIN(35, "PWM1"),
572 PINCTRL_PIN(36, "PWM2"),
573 PINCTRL_PIN(37, "PWM3"),
574 PINCTRL_PIN(38, "LPSS_UART0_RXD"),
575 PINCTRL_PIN(39, "LPSS_UART0_TXD"),
576 PINCTRL_PIN(40, "LPSS_UART0_RTS_B"),
577 PINCTRL_PIN(41, "LPSS_UART0_CTS_B"),
578 PINCTRL_PIN(42, "LPSS_UART1_RXD"),
579 PINCTRL_PIN(43, "LPSS_UART1_TXD"),
580 PINCTRL_PIN(44, "LPSS_UART1_RTS_B"),
581 PINCTRL_PIN(45, "LPSS_UART1_CTS_B"),
582 PINCTRL_PIN(46, "LPSS_UART2_RXD"),
583 PINCTRL_PIN(47, "LPSS_UART2_TXD"),
584 PINCTRL_PIN(48, "LPSS_UART2_RTS_B"),
585 PINCTRL_PIN(49, "LPSS_UART2_CTS_B"),
586 PINCTRL_PIN(50, "GP_CAMERASB00"),
587 PINCTRL_PIN(51, "GP_CAMERASB01"),
588 PINCTRL_PIN(52, "GP_CAMERASB02"),
589 PINCTRL_PIN(53, "GP_CAMERASB03"),
590 PINCTRL_PIN(54, "GP_CAMERASB04"),
591 PINCTRL_PIN(55, "GP_CAMERASB05"),
592 PINCTRL_PIN(56, "GP_CAMERASB06"),
593 PINCTRL_PIN(57, "GP_CAMERASB07"),
594 PINCTRL_PIN(58, "GP_CAMERASB08"),
595 PINCTRL_PIN(59, "GP_CAMERASB09"),
596 PINCTRL_PIN(60, "GP_CAMERASB10"),
597 PINCTRL_PIN(61, "GP_CAMERASB11"),
598 PINCTRL_PIN(62, "TCK"),
599 PINCTRL_PIN(63, "TRST_B"),
600 PINCTRL_PIN(64, "TMS"),
601 PINCTRL_PIN(65, "TDI"),
602 PINCTRL_PIN(66, "CX_PMODE"),
603 PINCTRL_PIN(67, "CX_PREQ_B"),
604 PINCTRL_PIN(68, "JTAGX"),
605 PINCTRL_PIN(69, "CX_PRDY_B"),
606 PINCTRL_PIN(70, "TDO"),
607 PINCTRL_PIN(71, "CNV_BRI_DT"),
608 PINCTRL_PIN(72, "CNV_BRI_RSP"),
609 PINCTRL_PIN(73, "CNV_RGI_DT"),
610 PINCTRL_PIN(74, "CNV_RGI_RSP"),
611 PINCTRL_PIN(75, "SVID0_ALERT_B"),
612 PINCTRL_PIN(76, "SVID0_DATA"),
613 PINCTRL_PIN(77, "SVID0_CLK"),
614};
615
616static const unsigned apl_north_pwm0_pins[] = { 34 };
617static const unsigned apl_north_pwm1_pins[] = { 35 };
618static const unsigned apl_north_pwm2_pins[] = { 36 };
619static const unsigned apl_north_pwm3_pins[] = { 37 };
620static const unsigned apl_north_uart0_pins[] = { 38, 39, 40, 41 };
621static const unsigned apl_north_uart1_pins[] = { 42, 43, 44, 45 };
622static const unsigned apl_north_uart2_pins[] = { 46, 47, 48, 49 };
623
624static const struct intel_pingroup apl_north_groups[] = {
625 PIN_GROUP("pwm0_grp", apl_north_pwm0_pins, 1),
626 PIN_GROUP("pwm1_grp", apl_north_pwm1_pins, 1),
627 PIN_GROUP("pwm2_grp", apl_north_pwm2_pins, 1),
628 PIN_GROUP("pwm3_grp", apl_north_pwm3_pins, 1),
629 PIN_GROUP("uart0_grp", apl_north_uart0_pins, 1),
630 PIN_GROUP("uart1_grp", apl_north_uart1_pins, 1),
631 PIN_GROUP("uart2_grp", apl_north_uart2_pins, 1),
632};
633
634static const char * const apl_north_pwm0_groups[] = { "pwm0_grp" };
635static const char * const apl_north_pwm1_groups[] = { "pwm1_grp" };
636static const char * const apl_north_pwm2_groups[] = { "pwm2_grp" };
637static const char * const apl_north_pwm3_groups[] = { "pwm3_grp" };
638static const char * const apl_north_uart0_groups[] = { "uart0_grp" };
639static const char * const apl_north_uart1_groups[] = { "uart1_grp" };
640static const char * const apl_north_uart2_groups[] = { "uart2_grp" };
641
642static const struct intel_function apl_north_functions[] = {
643 FUNCTION("pwm0", apl_north_pwm0_groups),
644 FUNCTION("pwm1", apl_north_pwm1_groups),
645 FUNCTION("pwm2", apl_north_pwm2_groups),
646 FUNCTION("pwm3", apl_north_pwm3_groups),
647 FUNCTION("uart0", apl_north_uart0_groups),
648 FUNCTION("uart1", apl_north_uart1_groups),
649 FUNCTION("uart2", apl_north_uart2_groups),
650};
651
652static const struct intel_community apl_north_communities[] = {
653 BXT_COMMUNITY(0, 77),
654};
655
656static const struct intel_pinctrl_soc_data apl_north_soc_data = {
657 .uid = "1",
658 .pins = apl_north_pins,
659 .npins = ARRAY_SIZE(apl_north_pins),
660 .groups = apl_north_groups,
661 .ngroups = ARRAY_SIZE(apl_north_groups),
662 .functions = apl_north_functions,
663 .nfunctions = ARRAY_SIZE(apl_north_functions),
664 .communities = apl_north_communities,
665 .ncommunities = ARRAY_SIZE(apl_north_communities),
666};
667
668static const struct pinctrl_pin_desc apl_northwest_pins[] = {
669 PINCTRL_PIN(0, "HV_DDI0_DDC_SDA"),
670 PINCTRL_PIN(1, "HV_DDI0_DDC_SCL"),
671 PINCTRL_PIN(2, "HV_DDI1_DDC_SDA"),
672 PINCTRL_PIN(3, "HV_DDI1_DDC_SCL"),
673 PINCTRL_PIN(4, "DBI_SDA"),
674 PINCTRL_PIN(5, "DBI_SCL"),
675 PINCTRL_PIN(6, "PANEL0_VDDEN"),
676 PINCTRL_PIN(7, "PANEL0_BKLTEN"),
677 PINCTRL_PIN(8, "PANEL0_BKLTCTL"),
678 PINCTRL_PIN(9, "PANEL1_VDDEN"),
679 PINCTRL_PIN(10, "PANEL1_BKLTEN"),
680 PINCTRL_PIN(11, "PANEL1_BKLTCTL"),
681 PINCTRL_PIN(12, "DBI_CSX"),
682 PINCTRL_PIN(13, "DBI_RESX"),
683 PINCTRL_PIN(14, "GP_INTD_DSI_TE1"),
684 PINCTRL_PIN(15, "GP_INTD_DSI_TE2"),
685 PINCTRL_PIN(16, "USB_OC0_B"),
686 PINCTRL_PIN(17, "USB_OC1_B"),
687 PINCTRL_PIN(18, "PMC_SPI_FS0"),
688 PINCTRL_PIN(19, "PMC_SPI_FS1"),
689 PINCTRL_PIN(20, "PMC_SPI_FS2"),
690 PINCTRL_PIN(21, "PMC_SPI_RXD"),
691 PINCTRL_PIN(22, "PMC_SPI_TXD"),
692 PINCTRL_PIN(23, "PMC_SPI_CLK"),
693 PINCTRL_PIN(24, "PMIC_PWRGOOD"),
694 PINCTRL_PIN(25, "PMIC_RESET_B"),
695 PINCTRL_PIN(26, "PMIC_SDWN_B"),
696 PINCTRL_PIN(27, "PMIC_BCUDISW2"),
697 PINCTRL_PIN(28, "PMIC_BCUDISCRIT"),
698 PINCTRL_PIN(29, "PMIC_THERMTRIP_B"),
699 PINCTRL_PIN(30, "PMIC_STDBY"),
700 PINCTRL_PIN(31, "PROCHOT_B"),
701 PINCTRL_PIN(32, "PMIC_I2C_SCL"),
702 PINCTRL_PIN(33, "PMIC_I2C_SDA"),
703 PINCTRL_PIN(34, "AVS_I2S1_MCLK"),
704 PINCTRL_PIN(35, "AVS_I2S1_BCLK"),
705 PINCTRL_PIN(36, "AVS_I2S1_WS_SYNC"),
706 PINCTRL_PIN(37, "AVS_I2S1_SDI"),
707 PINCTRL_PIN(38, "AVS_I2S1_SDO"),
708 PINCTRL_PIN(39, "AVS_M_CLK_A1"),
709 PINCTRL_PIN(40, "AVS_M_CLK_B1"),
710 PINCTRL_PIN(41, "AVS_M_DATA_1"),
711 PINCTRL_PIN(42, "AVS_M_CLK_AB2"),
712 PINCTRL_PIN(43, "AVS_M_DATA_2"),
713 PINCTRL_PIN(44, "AVS_I2S2_MCLK"),
714 PINCTRL_PIN(45, "AVS_I2S2_BCLK"),
715 PINCTRL_PIN(46, "AVS_I2S2_WS_SYNC"),
716 PINCTRL_PIN(47, "AVS_I2S2_SDI"),
717 PINCTRL_PIN(48, "AVS_I2S2_SDO"),
718 PINCTRL_PIN(49, "AVS_I2S3_BCLK"),
719 PINCTRL_PIN(50, "AVS_I2S3_WS_SYNC"),
720 PINCTRL_PIN(51, "AVS_I2S3_SDI"),
721 PINCTRL_PIN(52, "AVS_I2S3_SDO"),
722 PINCTRL_PIN(53, "FST_SPI_CS0_B"),
723 PINCTRL_PIN(54, "FST_SPI_CS1_B"),
724 PINCTRL_PIN(55, "FST_SPI_MOSI_IO0"),
725 PINCTRL_PIN(56, "FST_SPI_MISO_IO1"),
726 PINCTRL_PIN(57, "FST_SPI_IO2"),
727 PINCTRL_PIN(58, "FST_SPI_IO3"),
728 PINCTRL_PIN(59, "FST_SPI_CLK"),
729 PINCTRL_PIN(60, "FST_SPI_CLK_FB"),
730 PINCTRL_PIN(61, "GP_SSP_0_CLK"),
731 PINCTRL_PIN(62, "GP_SSP_0_FS0"),
732 PINCTRL_PIN(63, "GP_SSP_0_FS1"),
733 PINCTRL_PIN(64, "GP_SSP_0_RXD"),
734 PINCTRL_PIN(65, "GP_SSP_0_TXD"),
735 PINCTRL_PIN(66, "GP_SSP_1_CLK"),
736 PINCTRL_PIN(67, "GP_SSP_1_FS0"),
737 PINCTRL_PIN(68, "GP_SSP_1_FS1"),
738 PINCTRL_PIN(69, "GP_SSP_1_RXD"),
739 PINCTRL_PIN(70, "GP_SSP_1_TXD"),
740 PINCTRL_PIN(71, "GP_SSP_2_CLK"),
741 PINCTRL_PIN(72, "GP_SSP_2_FS0"),
742 PINCTRL_PIN(73, "GP_SSP_2_FS1"),
743 PINCTRL_PIN(74, "GP_SSP_2_FS2"),
744 PINCTRL_PIN(75, "GP_SSP_2_RXD"),
745 PINCTRL_PIN(76, "GP_SSP_2_TXD"),
746};
747
748static const unsigned apl_northwest_ssp0_pins[] = { 61, 62, 63, 64, 65 };
749static const unsigned apl_northwest_ssp1_pins[] = { 66, 67, 68, 69, 70 };
750static const unsigned apl_northwest_ssp2_pins[] = { 71, 72, 73, 74, 75, 76 };
751static const unsigned apl_northwest_uart3_pins[] = { 67, 68, 69, 70 };
752
753static const struct intel_pingroup apl_northwest_groups[] = {
754 PIN_GROUP("ssp0_grp", apl_northwest_ssp0_pins, 1),
755 PIN_GROUP("ssp1_grp", apl_northwest_ssp1_pins, 1),
756 PIN_GROUP("ssp2_grp", apl_northwest_ssp2_pins, 1),
757 PIN_GROUP("uart3_grp", apl_northwest_uart3_pins, 2),
758};
759
760static const char * const apl_northwest_ssp0_groups[] = { "ssp0_grp" };
761static const char * const apl_northwest_ssp1_groups[] = { "ssp1_grp" };
762static const char * const apl_northwest_ssp2_groups[] = { "ssp2_grp" };
763static const char * const apl_northwest_uart3_groups[] = { "uart3_grp" };
764
765static const struct intel_function apl_northwest_functions[] = {
766 FUNCTION("ssp0", apl_northwest_ssp0_groups),
767 FUNCTION("ssp1", apl_northwest_ssp1_groups),
768 FUNCTION("ssp2", apl_northwest_ssp2_groups),
769 FUNCTION("uart3", apl_northwest_uart3_groups),
770};
771
772static const struct intel_community apl_northwest_communities[] = {
773 BXT_COMMUNITY(0, 76),
774};
775
776static const struct intel_pinctrl_soc_data apl_northwest_soc_data = {
777 .uid = "2",
778 .pins = apl_northwest_pins,
779 .npins = ARRAY_SIZE(apl_northwest_pins),
780 .groups = apl_northwest_groups,
781 .ngroups = ARRAY_SIZE(apl_northwest_groups),
782 .functions = apl_northwest_functions,
783 .nfunctions = ARRAY_SIZE(apl_northwest_functions),
784 .communities = apl_northwest_communities,
785 .ncommunities = ARRAY_SIZE(apl_northwest_communities),
786};
787
788static const struct pinctrl_pin_desc apl_west_pins[] = {
789 PINCTRL_PIN(0, "LPSS_I2C0_SDA"),
790 PINCTRL_PIN(1, "LPSS_I2C0_SCL"),
791 PINCTRL_PIN(2, "LPSS_I2C1_SDA"),
792 PINCTRL_PIN(3, "LPSS_I2C1_SCL"),
793 PINCTRL_PIN(4, "LPSS_I2C2_SDA"),
794 PINCTRL_PIN(5, "LPSS_I2C2_SCL"),
795 PINCTRL_PIN(6, "LPSS_I2C3_SDA"),
796 PINCTRL_PIN(7, "LPSS_I2C3_SCL"),
797 PINCTRL_PIN(8, "LPSS_I2C4_SDA"),
798 PINCTRL_PIN(9, "LPSS_I2C4_SCL"),
799 PINCTRL_PIN(10, "LPSS_I2C5_SDA"),
800 PINCTRL_PIN(11, "LPSS_I2C5_SCL"),
801 PINCTRL_PIN(12, "LPSS_I2C6_SDA"),
802 PINCTRL_PIN(13, "LPSS_I2C6_SCL"),
803 PINCTRL_PIN(14, "LPSS_I2C7_SDA"),
804 PINCTRL_PIN(15, "LPSS_I2C7_SCL"),
805 PINCTRL_PIN(16, "ISH_GPIO_0"),
806 PINCTRL_PIN(17, "ISH_GPIO_1"),
807 PINCTRL_PIN(18, "ISH_GPIO_2"),
808 PINCTRL_PIN(19, "ISH_GPIO_3"),
809 PINCTRL_PIN(20, "ISH_GPIO_4"),
810 PINCTRL_PIN(21, "ISH_GPIO_5"),
811 PINCTRL_PIN(22, "ISH_GPIO_6"),
812 PINCTRL_PIN(23, "ISH_GPIO_7"),
813 PINCTRL_PIN(24, "ISH_GPIO_8"),
814 PINCTRL_PIN(25, "ISH_GPIO_9"),
815 PINCTRL_PIN(26, "PCIE_CLKREQ0_B"),
816 PINCTRL_PIN(27, "PCIE_CLKREQ1_B"),
817 PINCTRL_PIN(28, "PCIE_CLKREQ2_B"),
818 PINCTRL_PIN(29, "PCIE_CLKREQ3_B"),
819 PINCTRL_PIN(30, "OSC_CLK_OUT_0"),
820 PINCTRL_PIN(31, "OSC_CLK_OUT_1"),
821 PINCTRL_PIN(32, "OSC_CLK_OUT_2"),
822 PINCTRL_PIN(33, "OSC_CLK_OUT_3"),
823 PINCTRL_PIN(34, "OSC_CLK_OUT_4"),
824 PINCTRL_PIN(35, "PMU_AC_PRESENT"),
825 PINCTRL_PIN(36, "PMU_BATLOW_B"),
826 PINCTRL_PIN(37, "PMU_PLTRST_B"),
827 PINCTRL_PIN(38, "PMU_PWRBTN_B"),
828 PINCTRL_PIN(39, "PMU_RESETBUTTON_B"),
829 PINCTRL_PIN(40, "PMU_SLP_S0_B"),
830 PINCTRL_PIN(41, "PMU_SLP_S3_B"),
831 PINCTRL_PIN(42, "PMU_SLP_S4_B"),
832 PINCTRL_PIN(43, "PMU_SUSCLK"),
833 PINCTRL_PIN(44, "PMU_WAKE_B"),
834 PINCTRL_PIN(45, "SUS_STAT_B"),
835 PINCTRL_PIN(46, "SUSPWRDNACK"),
836};
837
838static const unsigned apl_west_i2c0_pins[] = { 0, 1 };
839static const unsigned apl_west_i2c1_pins[] = { 2, 3 };
840static const unsigned apl_west_i2c2_pins[] = { 4, 5 };
841static const unsigned apl_west_i2c3_pins[] = { 6, 7 };
842static const unsigned apl_west_i2c4_pins[] = { 8, 9 };
843static const unsigned apl_west_i2c5_pins[] = { 10, 11 };
844static const unsigned apl_west_i2c6_pins[] = { 12, 13 };
845static const unsigned apl_west_i2c7_pins[] = { 14, 15 };
846static const unsigned apl_west_uart2_pins[] = { 20, 21, 22, 34 };
847
848static const struct intel_pingroup apl_west_groups[] = {
849 PIN_GROUP("i2c0_grp", apl_west_i2c0_pins, 1),
850 PIN_GROUP("i2c1_grp", apl_west_i2c1_pins, 1),
851 PIN_GROUP("i2c2_grp", apl_west_i2c2_pins, 1),
852 PIN_GROUP("i2c3_grp", apl_west_i2c3_pins, 1),
853 PIN_GROUP("i2c4_grp", apl_west_i2c4_pins, 1),
854 PIN_GROUP("i2c5_grp", apl_west_i2c5_pins, 1),
855 PIN_GROUP("i2c6_grp", apl_west_i2c6_pins, 1),
856 PIN_GROUP("i2c7_grp", apl_west_i2c7_pins, 1),
857 PIN_GROUP("uart2_grp", apl_west_uart2_pins, 3),
858};
859
860static const char * const apl_west_i2c0_groups[] = { "i2c0_grp" };
861static const char * const apl_west_i2c1_groups[] = { "i2c1_grp" };
862static const char * const apl_west_i2c2_groups[] = { "i2c2_grp" };
863static const char * const apl_west_i2c3_groups[] = { "i2c3_grp" };
864static const char * const apl_west_i2c4_groups[] = { "i2c4_grp" };
865static const char * const apl_west_i2c5_groups[] = { "i2c5_grp" };
866static const char * const apl_west_i2c6_groups[] = { "i2c6_grp" };
867static const char * const apl_west_i2c7_groups[] = { "i2c7_grp" };
868static const char * const apl_west_uart2_groups[] = { "uart2_grp" };
869
870static const struct intel_function apl_west_functions[] = {
871 FUNCTION("i2c0", apl_west_i2c0_groups),
872 FUNCTION("i2c1", apl_west_i2c1_groups),
873 FUNCTION("i2c2", apl_west_i2c2_groups),
874 FUNCTION("i2c3", apl_west_i2c3_groups),
875 FUNCTION("i2c4", apl_west_i2c4_groups),
876 FUNCTION("i2c5", apl_west_i2c5_groups),
877 FUNCTION("i2c6", apl_west_i2c6_groups),
878 FUNCTION("i2c7", apl_west_i2c7_groups),
879 FUNCTION("uart2", apl_west_uart2_groups),
880};
881
882static const struct intel_community apl_west_communities[] = {
883 BXT_COMMUNITY(0, 46),
884};
885
886static const struct intel_pinctrl_soc_data apl_west_soc_data = {
887 .uid = "3",
888 .pins = apl_west_pins,
889 .npins = ARRAY_SIZE(apl_west_pins),
890 .groups = apl_west_groups,
891 .ngroups = ARRAY_SIZE(apl_west_groups),
892 .functions = apl_west_functions,
893 .nfunctions = ARRAY_SIZE(apl_west_functions),
894 .communities = apl_west_communities,
895 .ncommunities = ARRAY_SIZE(apl_west_communities),
896};
897
898static const struct pinctrl_pin_desc apl_southwest_pins[] = {
899 PINCTRL_PIN(0, "PCIE_WAKE0_B"),
900 PINCTRL_PIN(1, "PCIE_WAKE1_B"),
901 PINCTRL_PIN(2, "PCIE_WAKE2_B"),
902 PINCTRL_PIN(3, "PCIE_WAKE3_B"),
903 PINCTRL_PIN(4, "EMMC0_CLK"),
904 PINCTRL_PIN(5, "EMMC0_D0"),
905 PINCTRL_PIN(6, "EMMC0_D1"),
906 PINCTRL_PIN(7, "EMMC0_D2"),
907 PINCTRL_PIN(8, "EMMC0_D3"),
908 PINCTRL_PIN(9, "EMMC0_D4"),
909 PINCTRL_PIN(10, "EMMC0_D5"),
910 PINCTRL_PIN(11, "EMMC0_D6"),
911 PINCTRL_PIN(12, "EMMC0_D7"),
912 PINCTRL_PIN(13, "EMMC0_CMD"),
913 PINCTRL_PIN(14, "SDIO_CLK"),
914 PINCTRL_PIN(15, "SDIO_D0"),
915 PINCTRL_PIN(16, "SDIO_D1"),
916 PINCTRL_PIN(17, "SDIO_D2"),
917 PINCTRL_PIN(18, "SDIO_D3"),
918 PINCTRL_PIN(19, "SDIO_CMD"),
919 PINCTRL_PIN(20, "SDCARD_CLK"),
920 PINCTRL_PIN(21, "SDCARD_CLK_FB"),
921 PINCTRL_PIN(22, "SDCARD_D0"),
922 PINCTRL_PIN(23, "SDCARD_D1"),
923 PINCTRL_PIN(24, "SDCARD_D2"),
924 PINCTRL_PIN(25, "SDCARD_D3"),
925 PINCTRL_PIN(26, "SDCARD_CD_B"),
926 PINCTRL_PIN(27, "SDCARD_CMD"),
927 PINCTRL_PIN(28, "SDCARD_LVL_WP"),
928 PINCTRL_PIN(29, "EMMC0_STROBE"),
929 PINCTRL_PIN(30, "SDIO_PWR_DOWN_B"),
930 PINCTRL_PIN(31, "SMB_ALERTB"),
931 PINCTRL_PIN(32, "SMB_CLK"),
932 PINCTRL_PIN(33, "SMB_DATA"),
933 PINCTRL_PIN(34, "LPC_ILB_SERIRQ"),
934 PINCTRL_PIN(35, "LPC_CLKOUT0"),
935 PINCTRL_PIN(36, "LPC_CLKOUT1"),
936 PINCTRL_PIN(37, "LPC_AD0"),
937 PINCTRL_PIN(38, "LPC_AD1"),
938 PINCTRL_PIN(39, "LPC_AD2"),
939 PINCTRL_PIN(40, "LPC_AD3"),
940 PINCTRL_PIN(41, "LPC_CLKRUNB"),
941 PINCTRL_PIN(42, "LPC_FRAMEB"),
942};
943
944static const unsigned apl_southwest_emmc0_pins[] = {
945 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 29,
946};
947static const unsigned apl_southwest_sdio_pins[] = {
948 14, 15, 16, 17, 18, 19, 30,
949};
950static const unsigned apl_southwest_sdcard_pins[] = {
951 20, 21, 22, 23, 24, 25, 26, 27, 28,
952};
953static const unsigned apl_southwest_i2c7_pins[] = { 32, 33 };
954
955static const struct intel_pingroup apl_southwest_groups[] = {
956 PIN_GROUP("emmc0_grp", apl_southwest_emmc0_pins, 1),
957 PIN_GROUP("sdio_grp", apl_southwest_sdio_pins, 1),
958 PIN_GROUP("sdcard_grp", apl_southwest_sdcard_pins, 1),
959 PIN_GROUP("i2c7_grp", apl_southwest_i2c7_pins, 2),
960};
961
962static const char * const apl_southwest_emmc0_groups[] = { "emmc0_grp" };
963static const char * const apl_southwest_sdio_groups[] = { "sdio_grp" };
964static const char * const apl_southwest_sdcard_groups[] = { "sdcard_grp" };
965static const char * const apl_southwest_i2c7_groups[] = { "i2c7_grp" };
966
967static const struct intel_function apl_southwest_functions[] = {
968 FUNCTION("emmc0", apl_southwest_emmc0_groups),
969 FUNCTION("sdio", apl_southwest_sdio_groups),
970 FUNCTION("sdcard", apl_southwest_sdcard_groups),
971 FUNCTION("i2c7", apl_southwest_i2c7_groups),
972};
973
974static const struct intel_community apl_southwest_communities[] = {
975 BXT_COMMUNITY(0, 42),
976};
977
978static const struct intel_pinctrl_soc_data apl_southwest_soc_data = {
979 .uid = "4",
980 .pins = apl_southwest_pins,
981 .npins = ARRAY_SIZE(apl_southwest_pins),
982 .groups = apl_southwest_groups,
983 .ngroups = ARRAY_SIZE(apl_southwest_groups),
984 .functions = apl_southwest_functions,
985 .nfunctions = ARRAY_SIZE(apl_southwest_functions),
986 .communities = apl_southwest_communities,
987 .ncommunities = ARRAY_SIZE(apl_southwest_communities),
988};
989
990static const struct intel_pinctrl_soc_data *apl_pinctrl_soc_data[] = {
991 &apl_north_soc_data,
992 &apl_northwest_soc_data,
993 &apl_west_soc_data,
994 &apl_southwest_soc_data,
995 NULL,
996};
997
998static const struct acpi_device_id bxt_pinctrl_acpi_match[] = {
999 { "INT3452", (kernel_ulong_t)apl_pinctrl_soc_data },
1000 { "INT34D1", (kernel_ulong_t)bxt_pinctrl_soc_data },
1001 { }
1002};
1003MODULE_DEVICE_TABLE(acpi, bxt_pinctrl_acpi_match);
1004
1005static int bxt_pinctrl_probe(struct platform_device *pdev)
1006{
1007 const struct intel_pinctrl_soc_data *soc_data = NULL;
1008 const struct intel_pinctrl_soc_data **soc_table;
1009 const struct acpi_device_id *id;
1010 struct acpi_device *adev;
1011 int i;
1012
1013 adev = ACPI_COMPANION(&pdev->dev);
1014 if (!adev)
1015 return -ENODEV;
1016
1017 id = acpi_match_device(bxt_pinctrl_acpi_match, &pdev->dev);
1018 if (!id)
1019 return -ENODEV;
1020
1021 soc_table = (const struct intel_pinctrl_soc_data **)id->driver_data;
1022
1023 for (i = 0; soc_table[i]; i++) {
1024 if (!strcmp(adev->pnp.unique_id, soc_table[i]->uid)) {
1025 soc_data = soc_table[i];
1026 break;
1027 }
1028 }
1029
1030 if (!soc_data)
1031 return -ENODEV;
1032
1033 return intel_pinctrl_probe(pdev, soc_data);
1034}
1035
1036static const struct dev_pm_ops bxt_pinctrl_pm_ops = {
1037 SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
1038 intel_pinctrl_resume)
1039};
1040
1041static struct platform_driver bxt_pinctrl_driver = {
1042 .probe = bxt_pinctrl_probe,
1043 .remove = intel_pinctrl_remove,
1044 .driver = {
1045 .name = "broxton-pinctrl",
1046 .acpi_match_table = bxt_pinctrl_acpi_match,
1047 .pm = &bxt_pinctrl_pm_ops,
1048 },
1049};
1050
1051static int __init bxt_pinctrl_init(void)
1052{
1053 return platform_driver_register(&bxt_pinctrl_driver);
1054}
1055subsys_initcall(bxt_pinctrl_init);
1056
1057static void __exit bxt_pinctrl_exit(void)
1058{
1059 platform_driver_unregister(&bxt_pinctrl_driver);
1060}
1061module_exit(bxt_pinctrl_exit);
1062
1063MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1064MODULE_DESCRIPTION("Intel Broxton SoC pinctrl/GPIO driver");
1065MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 270c127e03ea..84936bae6e5e 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1149,16 +1149,6 @@ static struct pinctrl_desc chv_pinctrl_desc = {
1149 .owner = THIS_MODULE, 1149 .owner = THIS_MODULE,
1150}; 1150};
1151 1151
1152static int chv_gpio_request(struct gpio_chip *chip, unsigned offset)
1153{
1154 return pinctrl_request_gpio(chip->base + offset);
1155}
1156
1157static void chv_gpio_free(struct gpio_chip *chip, unsigned offset)
1158{
1159 pinctrl_free_gpio(chip->base + offset);
1160}
1161
1162static unsigned chv_gpio_offset_to_pin(struct chv_pinctrl *pctrl, 1152static unsigned chv_gpio_offset_to_pin(struct chv_pinctrl *pctrl,
1163 unsigned offset) 1153 unsigned offset)
1164{ 1154{
@@ -1238,8 +1228,8 @@ static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
1238 1228
1239static const struct gpio_chip chv_gpio_chip = { 1229static const struct gpio_chip chv_gpio_chip = {
1240 .owner = THIS_MODULE, 1230 .owner = THIS_MODULE,
1241 .request = chv_gpio_request, 1231 .request = gpiochip_generic_request,
1242 .free = chv_gpio_free, 1232 .free = gpiochip_generic_free,
1243 .get_direction = chv_gpio_get_direction, 1233 .get_direction = chv_gpio_get_direction,
1244 .direction_input = chv_gpio_direction_input, 1234 .direction_input = chv_gpio_direction_input,
1245 .direction_output = chv_gpio_direction_output, 1235 .direction_output = chv_gpio_direction_output,
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 54848b8decef..392e28d3f48d 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/interrupt.h>
15#include <linux/acpi.h> 16#include <linux/acpi.h>
16#include <linux/gpio.h> 17#include <linux/gpio.h>
17#include <linux/gpio/driver.h> 18#include <linux/gpio/driver.h>
@@ -159,8 +160,7 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
159 return !(readl(padown) & PADOWN_MASK(padno)); 160 return !(readl(padown) & PADOWN_MASK(padno));
160} 161}
161 162
162static bool intel_pad_reserved_for_acpi(struct intel_pinctrl *pctrl, 163static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin)
163 unsigned pin)
164{ 164{
165 const struct intel_community *community; 165 const struct intel_community *community;
166 unsigned padno, gpp, offset; 166 unsigned padno, gpp, offset;
@@ -216,7 +216,6 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
216static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned pin) 216static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned pin)
217{ 217{
218 return intel_pad_owned_by_host(pctrl, pin) && 218 return intel_pad_owned_by_host(pctrl, pin) &&
219 !intel_pad_reserved_for_acpi(pctrl, pin) &&
220 !intel_pad_locked(pctrl, pin); 219 !intel_pad_locked(pctrl, pin);
221} 220}
222 221
@@ -269,7 +268,7 @@ static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
269 seq_printf(s, "0x%08x 0x%08x", cfg0, cfg1); 268 seq_printf(s, "0x%08x 0x%08x", cfg0, cfg1);
270 269
271 locked = intel_pad_locked(pctrl, pin); 270 locked = intel_pad_locked(pctrl, pin);
272 acpi = intel_pad_reserved_for_acpi(pctrl, pin); 271 acpi = intel_pad_acpi_mode(pctrl, pin);
273 272
274 if (locked || acpi) { 273 if (locked || acpi) {
275 seq_puts(s, " ["); 274 seq_puts(s, " [");
@@ -597,16 +596,6 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
597 .owner = THIS_MODULE, 596 .owner = THIS_MODULE,
598}; 597};
599 598
600static int intel_gpio_request(struct gpio_chip *chip, unsigned offset)
601{
602 return pinctrl_request_gpio(chip->base + offset);
603}
604
605static void intel_gpio_free(struct gpio_chip *chip, unsigned offset)
606{
607 pinctrl_free_gpio(chip->base + offset);
608}
609
610static int intel_gpio_get(struct gpio_chip *chip, unsigned offset) 599static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
611{ 600{
612 struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(chip); 601 struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
@@ -654,8 +643,8 @@ static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
654 643
655static const struct gpio_chip intel_gpio_chip = { 644static const struct gpio_chip intel_gpio_chip = {
656 .owner = THIS_MODULE, 645 .owner = THIS_MODULE,
657 .request = intel_gpio_request, 646 .request = gpiochip_generic_request,
658 .free = intel_gpio_free, 647 .free = gpiochip_generic_free,
659 .direction_input = intel_gpio_direction_input, 648 .direction_input = intel_gpio_direction_input,
660 .direction_output = intel_gpio_direction_output, 649 .direction_output = intel_gpio_direction_output,
661 .get = intel_gpio_get, 650 .get = intel_gpio_get,
@@ -736,6 +725,16 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
736 if (!reg) 725 if (!reg)
737 return -EINVAL; 726 return -EINVAL;
738 727
728 /*
729 * If the pin is in ACPI mode it is still usable as a GPIO but it
730 * cannot be used as IRQ because GPI_IS status bit will not be
731 * updated by the host controller hardware.
732 */
733 if (intel_pad_acpi_mode(pctrl, pin)) {
734 dev_warn(pctrl->dev, "pin %u cannot be used as IRQ\n", pin);
735 return -EPERM;
736 }
737
739 spin_lock_irqsave(&pctrl->lock, flags); 738 spin_lock_irqsave(&pctrl->lock, flags);
740 739
741 value = readl(reg); 740 value = readl(reg);
@@ -803,9 +802,11 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
803 return 0; 802 return 0;
804} 803}
805 804
806static void intel_gpio_community_irq_handler(struct gpio_chip *gc, 805static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
807 const struct intel_community *community) 806 const struct intel_community *community)
808{ 807{
808 struct gpio_chip *gc = &pctrl->chip;
809 irqreturn_t ret = IRQ_NONE;
809 int gpp; 810 int gpp;
810 811
811 for (gpp = 0; gpp < community->ngpps; gpp++) { 812 for (gpp = 0; gpp < community->ngpps; gpp++) {
@@ -832,24 +833,28 @@ static void intel_gpio_community_irq_handler(struct gpio_chip *gc,
832 irq = irq_find_mapping(gc->irqdomain, 833 irq = irq_find_mapping(gc->irqdomain,
833 community->pin_base + padno); 834 community->pin_base + padno);
834 generic_handle_irq(irq); 835 generic_handle_irq(irq);
836
837 ret |= IRQ_HANDLED;
835 } 838 }
836 } 839 }
840
841 return ret;
837} 842}
838 843
839static void intel_gpio_irq_handler(struct irq_desc *desc) 844static irqreturn_t intel_gpio_irq(int irq, void *data)
840{ 845{
841 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 846 const struct intel_community *community;
842 struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc); 847 struct intel_pinctrl *pctrl = data;
843 struct irq_chip *chip = irq_desc_get_chip(desc); 848 irqreturn_t ret = IRQ_NONE;
844 int i; 849 int i;
845 850
846 chained_irq_enter(chip, desc);
847
848 /* Need to check all communities for pending interrupts */ 851 /* Need to check all communities for pending interrupts */
849 for (i = 0; i < pctrl->ncommunities; i++) 852 for (i = 0; i < pctrl->ncommunities; i++) {
850 intel_gpio_community_irq_handler(gc, &pctrl->communities[i]); 853 community = &pctrl->communities[i];
854 ret |= intel_gpio_community_irq_handler(pctrl, community);
855 }
851 856
852 chained_irq_exit(chip, desc); 857 return ret;
853} 858}
854 859
855static struct irq_chip intel_gpio_irqchip = { 860static struct irq_chip intel_gpio_irqchip = {
@@ -861,26 +866,6 @@ static struct irq_chip intel_gpio_irqchip = {
861 .irq_set_wake = intel_gpio_irq_wake, 866 .irq_set_wake = intel_gpio_irq_wake,
862}; 867};
863 868
864static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
865{
866 size_t i;
867
868 for (i = 0; i < pctrl->ncommunities; i++) {
869 const struct intel_community *community;
870 void __iomem *base;
871 unsigned gpp;
872
873 community = &pctrl->communities[i];
874 base = community->regs;
875
876 for (gpp = 0; gpp < community->ngpps; gpp++) {
877 /* Mask and clear all interrupts */
878 writel(0, base + community->ie_offset + gpp * 4);
879 writel(0xffff, base + GPI_IS + gpp * 4);
880 }
881 }
882}
883
884static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) 869static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
885{ 870{
886 int ret; 871 int ret;
@@ -902,21 +887,36 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
902 0, 0, pctrl->soc->npins); 887 0, 0, pctrl->soc->npins);
903 if (ret) { 888 if (ret) {
904 dev_err(pctrl->dev, "failed to add GPIO pin range\n"); 889 dev_err(pctrl->dev, "failed to add GPIO pin range\n");
905 gpiochip_remove(&pctrl->chip); 890 goto fail;
906 return ret; 891 }
892
893 /*
894 * We need to request the interrupt here (instead of providing chip
895 * to the irq directly) because on some platforms several GPIO
896 * controllers share the same interrupt line.
897 */
898 ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq, IRQF_SHARED,
899 dev_name(pctrl->dev), pctrl);
900 if (ret) {
901 dev_err(pctrl->dev, "failed to request interrupt\n");
902 goto fail;
907 } 903 }
908 904
909 ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0, 905 ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
910 handle_simple_irq, IRQ_TYPE_NONE); 906 handle_simple_irq, IRQ_TYPE_NONE);
911 if (ret) { 907 if (ret) {
912 dev_err(pctrl->dev, "failed to add irqchip\n"); 908 dev_err(pctrl->dev, "failed to add irqchip\n");
913 gpiochip_remove(&pctrl->chip); 909 goto fail;
914 return ret;
915 } 910 }
916 911
917 gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq, 912 gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
918 intel_gpio_irq_handler); 913 NULL);
919 return 0; 914 return 0;
915
916fail:
917 gpiochip_remove(&pctrl->chip);
918
919 return ret;
920} 920}
921 921
922static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl) 922static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl)
@@ -1087,6 +1087,26 @@ int intel_pinctrl_suspend(struct device *dev)
1087} 1087}
1088EXPORT_SYMBOL_GPL(intel_pinctrl_suspend); 1088EXPORT_SYMBOL_GPL(intel_pinctrl_suspend);
1089 1089
1090static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
1091{
1092 size_t i;
1093
1094 for (i = 0; i < pctrl->ncommunities; i++) {
1095 const struct intel_community *community;
1096 void __iomem *base;
1097 unsigned gpp;
1098
1099 community = &pctrl->communities[i];
1100 base = community->regs;
1101
1102 for (gpp = 0; gpp < community->ngpps; gpp++) {
1103 /* Mask and clear all interrupts */
1104 writel(0, base + community->ie_offset + gpp * 4);
1105 writel(0xffff, base + GPI_IS + gpp * 4);
1106 }
1107 }
1108}
1109
1090int intel_pinctrl_resume(struct device *dev) 1110int intel_pinctrl_resume(struct device *dev)
1091{ 1111{
1092 struct platform_device *pdev = to_platform_device(dev); 1112 struct platform_device *pdev = to_platform_device(dev);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 1b22f96ba839..f307f1d27d64 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -723,16 +723,6 @@ static const struct pinmux_ops mtk_pmx_ops = {
723 .gpio_set_direction = mtk_pmx_gpio_set_direction, 723 .gpio_set_direction = mtk_pmx_gpio_set_direction,
724}; 724};
725 725
726static int mtk_gpio_request(struct gpio_chip *chip, unsigned offset)
727{
728 return pinctrl_request_gpio(chip->base + offset);
729}
730
731static void mtk_gpio_free(struct gpio_chip *chip, unsigned offset)
732{
733 pinctrl_free_gpio(chip->base + offset);
734}
735
736static int mtk_gpio_direction_input(struct gpio_chip *chip, 726static int mtk_gpio_direction_input(struct gpio_chip *chip,
737 unsigned offset) 727 unsigned offset)
738{ 728{
@@ -899,7 +889,7 @@ static int mtk_eint_flip_edge(struct mtk_pinctrl *pctl, int hwirq)
899 int start_level, curr_level; 889 int start_level, curr_level;
900 unsigned int reg_offset; 890 unsigned int reg_offset;
901 const struct mtk_eint_offsets *eint_offsets = &(pctl->devdata->eint_offsets); 891 const struct mtk_eint_offsets *eint_offsets = &(pctl->devdata->eint_offsets);
902 u32 mask = 1 << (hwirq & 0x1f); 892 u32 mask = BIT(hwirq & 0x1f);
903 u32 port = (hwirq >> 5) & eint_offsets->port_mask; 893 u32 port = (hwirq >> 5) & eint_offsets->port_mask;
904 void __iomem *reg = pctl->eint_reg_base + (port << 2); 894 void __iomem *reg = pctl->eint_reg_base + (port << 2);
905 const struct mtk_desc_pin *pin; 895 const struct mtk_desc_pin *pin;
@@ -1005,8 +995,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
1005 995
1006static struct gpio_chip mtk_gpio_chip = { 996static struct gpio_chip mtk_gpio_chip = {
1007 .owner = THIS_MODULE, 997 .owner = THIS_MODULE,
1008 .request = mtk_gpio_request, 998 .request = gpiochip_generic_request,
1009 .free = mtk_gpio_free, 999 .free = gpiochip_generic_free,
1010 .direction_input = mtk_gpio_direction_input, 1000 .direction_input = mtk_gpio_direction_input,
1011 .direction_output = mtk_gpio_direction_output, 1001 .direction_output = mtk_gpio_direction_output,
1012 .get = mtk_gpio_get, 1002 .get = mtk_gpio_get,
@@ -1436,7 +1426,7 @@ int mtk_pctrl_init(struct platform_device *pdev,
1436 irq_set_chip_and_handler(virq, &mtk_pinctrl_irq_chip, 1426 irq_set_chip_and_handler(virq, &mtk_pinctrl_irq_chip,
1437 handle_level_irq); 1427 handle_level_irq);
1438 irq_set_chip_data(virq, pctl); 1428 irq_set_chip_data(virq, pctl);
1439 }; 1429 }
1440 1430
1441 irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl); 1431 irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl);
1442 return 0; 1432 return 0;
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 97681fac082e..b59fbb4b1fb1 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -654,25 +654,11 @@ static inline void abx500_gpio_dbg_show_one(struct seq_file *s,
654#define abx500_gpio_dbg_show NULL 654#define abx500_gpio_dbg_show NULL
655#endif 655#endif
656 656
657static int abx500_gpio_request(struct gpio_chip *chip, unsigned offset)
658{
659 int gpio = chip->base + offset;
660
661 return pinctrl_request_gpio(gpio);
662}
663
664static void abx500_gpio_free(struct gpio_chip *chip, unsigned offset)
665{
666 int gpio = chip->base + offset;
667
668 pinctrl_free_gpio(gpio);
669}
670
671static struct gpio_chip abx500gpio_chip = { 657static struct gpio_chip abx500gpio_chip = {
672 .label = "abx500-gpio", 658 .label = "abx500-gpio",
673 .owner = THIS_MODULE, 659 .owner = THIS_MODULE,
674 .request = abx500_gpio_request, 660 .request = gpiochip_generic_request,
675 .free = abx500_gpio_free, 661 .free = gpiochip_generic_free,
676 .direction_input = abx500_gpio_direction_input, 662 .direction_input = abx500_gpio_direction_input,
677 .get = abx500_gpio_get, 663 .get = abx500_gpio_get,
678 .direction_output = abx500_gpio_direction_output, 664 .direction_output = abx500_gpio_direction_output,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 96cf03908e93..eebfae0c9b7c 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -884,24 +884,6 @@ static void nmk_gpio_latent_irq_handler(struct irq_desc *desc)
884 884
885/* I/O Functions */ 885/* I/O Functions */
886 886
887static int nmk_gpio_request(struct gpio_chip *chip, unsigned offset)
888{
889 /*
890 * Map back to global GPIO space and request muxing, the direction
891 * parameter does not matter for this controller.
892 */
893 int gpio = chip->base + offset;
894
895 return pinctrl_request_gpio(gpio);
896}
897
898static void nmk_gpio_free(struct gpio_chip *chip, unsigned offset)
899{
900 int gpio = chip->base + offset;
901
902 pinctrl_free_gpio(gpio);
903}
904
905static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset) 887static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
906{ 888{
907 struct nmk_gpio_chip *nmk_chip = 889 struct nmk_gpio_chip *nmk_chip =
@@ -1267,8 +1249,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
1267 spin_lock_init(&nmk_chip->lock); 1249 spin_lock_init(&nmk_chip->lock);
1268 1250
1269 chip = &nmk_chip->chip; 1251 chip = &nmk_chip->chip;
1270 chip->request = nmk_gpio_request; 1252 chip->request = gpiochip_generic_request;
1271 chip->free = nmk_gpio_free; 1253 chip->free = gpiochip_generic_free;
1272 chip->direction_input = nmk_gpio_make_input; 1254 chip->direction_input = nmk_gpio_make_input;
1273 chip->get = nmk_gpio_get_input; 1255 chip->get = nmk_gpio_get_input;
1274 chip->direction_output = nmk_gpio_make_output; 1256 chip->direction_output = nmk_gpio_make_output;
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index e63ad9fbd388..099a3442ff42 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -28,25 +28,25 @@
28 28
29#ifdef CONFIG_DEBUG_FS 29#ifdef CONFIG_DEBUG_FS
30static const struct pin_config_item conf_items[] = { 30static const struct pin_config_item conf_items[] = {
31 PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false),
31 PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false), 32 PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false),
32 PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false), 33 PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false),
33 PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false),
34 PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
35 PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL, false), 34 PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL, false),
36 PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 35 PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
37 "input bias pull to pin specific state", NULL, false), 36 "input bias pull to pin specific state", NULL, false),
38 PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), 37 PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
39 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), 38 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
40 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), 39 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
40 PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
41 PCONFDUMP(PIN_CONFIG_DRIVE_STRENGTH, "output drive strength", "mA", true), 41 PCONFDUMP(PIN_CONFIG_DRIVE_STRENGTH, "output drive strength", "mA", true),
42 PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec", true),
42 PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false), 43 PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false),
43 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
44 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false), 44 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false),
45 PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec", true), 45 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
46 PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
47 PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
48 PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode", true), 46 PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode", true),
49 PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true), 47 PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
48 PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
49 PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
50}; 50};
51 51
52static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev, 52static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
@@ -150,27 +150,28 @@ EXPORT_SYMBOL_GPL(pinconf_generic_dump_config);
150 150
151#ifdef CONFIG_OF 151#ifdef CONFIG_OF
152static const struct pinconf_generic_params dt_params[] = { 152static const struct pinconf_generic_params dt_params[] = {
153 { "bias-bus-hold", PIN_CONFIG_BIAS_BUS_HOLD, 0 },
153 { "bias-disable", PIN_CONFIG_BIAS_DISABLE, 0 }, 154 { "bias-disable", PIN_CONFIG_BIAS_DISABLE, 0 },
154 { "bias-high-impedance", PIN_CONFIG_BIAS_HIGH_IMPEDANCE, 0 }, 155 { "bias-high-impedance", PIN_CONFIG_BIAS_HIGH_IMPEDANCE, 0 },
155 { "bias-bus-hold", PIN_CONFIG_BIAS_BUS_HOLD, 0 },
156 { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 }, 156 { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 },
157 { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 },
158 { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 }, 157 { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 },
159 { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 }, 158 { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 },
160 { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 }, 159 { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 },
161 { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 }, 160 { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 },
161 { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 },
162 { "drive-strength", PIN_CONFIG_DRIVE_STRENGTH, 0 }, 162 { "drive-strength", PIN_CONFIG_DRIVE_STRENGTH, 0 },
163 { "input-enable", PIN_CONFIG_INPUT_ENABLE, 1 }, 163 { "input-debounce", PIN_CONFIG_INPUT_DEBOUNCE, 0 },
164 { "input-disable", PIN_CONFIG_INPUT_ENABLE, 0 }, 164 { "input-disable", PIN_CONFIG_INPUT_ENABLE, 0 },
165 { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, 165 { "input-enable", PIN_CONFIG_INPUT_ENABLE, 1 },
166 { "input-schmitt", PIN_CONFIG_INPUT_SCHMITT, 0 },
166 { "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 }, 167 { "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 },
167 { "input-debounce", PIN_CONFIG_INPUT_DEBOUNCE, 0 }, 168 { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
168 { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
169 { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
170 { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 }, 169 { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
171 { "output-low", PIN_CONFIG_OUTPUT, 0, }, 170 { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
172 { "output-high", PIN_CONFIG_OUTPUT, 1, }, 171 { "output-high", PIN_CONFIG_OUTPUT, 1, },
173 { "slew-rate", PIN_CONFIG_SLEW_RATE, 0}, 172 { "output-low", PIN_CONFIG_OUTPUT, 0, },
173 { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
174 { "slew-rate", PIN_CONFIG_SLEW_RATE, 0 },
174}; 175};
175 176
176/** 177/**
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 29a7bb17a42f..4dd7722f9935 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -411,7 +411,7 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
411 const struct pinctrl_map *found = NULL; 411 const struct pinctrl_map *found = NULL;
412 struct pinctrl_dev *pctldev; 412 struct pinctrl_dev *pctldev;
413 struct dbg_cfg *dbg = &pinconf_dbg_conf; 413 struct dbg_cfg *dbg = &pinconf_dbg_conf;
414 int i, j; 414 int i;
415 415
416 mutex_lock(&pinctrl_maps_mutex); 416 mutex_lock(&pinctrl_maps_mutex);
417 417
@@ -424,13 +424,10 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
424 if (strcmp(map->name, dbg->state_name)) 424 if (strcmp(map->name, dbg->state_name))
425 continue; 425 continue;
426 426
427 for (j = 0; j < map->data.configs.num_configs; j++) { 427 if (!strcmp(map->data.configs.group_or_pin, dbg->pin_name)) {
428 if (!strcmp(map->data.configs.group_or_pin, 428 /* We found the right pin */
429 dbg->pin_name)) { 429 found = map;
430 /* We found the right pin / state */ 430 break;
431 found = map;
432 break;
433 }
434 } 431 }
435 } 432 }
436 433
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index f6be68518c87..fd342dffe4dc 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -713,16 +713,6 @@ static struct pinctrl_desc adi_pinmux_desc = {
713 .owner = THIS_MODULE, 713 .owner = THIS_MODULE,
714}; 714};
715 715
716static int adi_gpio_request(struct gpio_chip *chip, unsigned offset)
717{
718 return pinctrl_request_gpio(chip->base + offset);
719}
720
721static void adi_gpio_free(struct gpio_chip *chip, unsigned offset)
722{
723 pinctrl_free_gpio(chip->base + offset);
724}
725
726static int adi_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 716static int adi_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
727{ 717{
728 struct gpio_port *port; 718 struct gpio_port *port;
@@ -994,8 +984,8 @@ static int adi_gpio_probe(struct platform_device *pdev)
994 port->chip.get = adi_gpio_get_value; 984 port->chip.get = adi_gpio_get_value;
995 port->chip.direction_output = adi_gpio_direction_output; 985 port->chip.direction_output = adi_gpio_direction_output;
996 port->chip.set = adi_gpio_set_value; 986 port->chip.set = adi_gpio_set_value;
997 port->chip.request = adi_gpio_request; 987 port->chip.request = gpiochip_generic_request,
998 port->chip.free = adi_gpio_free; 988 port->chip.free = gpiochip_generic_free,
999 port->chip.to_irq = adi_gpio_to_irq; 989 port->chip.to_irq = adi_gpio_to_irq;
1000 if (pdata->port_gpio_base > 0) 990 if (pdata->port_gpio_base > 0)
1001 port->chip.base = pdata->port_gpio_base; 991 port->chip.base = pdata->port_gpio_base;
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 4747e08f5389..56af28b95a44 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -536,21 +536,11 @@ static int as3722_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
536 return as3722_irq_get_virq(as_pci->as3722, offset); 536 return as3722_irq_get_virq(as_pci->as3722, offset);
537} 537}
538 538
539static int as3722_gpio_request(struct gpio_chip *chip, unsigned offset)
540{
541 return pinctrl_request_gpio(chip->base + offset);
542}
543
544static void as3722_gpio_free(struct gpio_chip *chip, unsigned offset)
545{
546 pinctrl_free_gpio(chip->base + offset);
547}
548
549static const struct gpio_chip as3722_gpio_chip = { 539static const struct gpio_chip as3722_gpio_chip = {
550 .label = "as3722-gpio", 540 .label = "as3722-gpio",
551 .owner = THIS_MODULE, 541 .owner = THIS_MODULE,
552 .request = as3722_gpio_request, 542 .request = gpiochip_generic_request,
553 .free = as3722_gpio_free, 543 .free = gpiochip_generic_free,
554 .get = as3722_gpio_get, 544 .get = as3722_gpio_get,
555 .set = as3722_gpio_set, 545 .set = as3722_gpio_set,
556 .direction_input = as3722_gpio_direction_input, 546 .direction_input = as3722_gpio_direction_input,
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
new file mode 100644
index 000000000000..33edd07d9149
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -0,0 +1,1094 @@
1/*
2 * Driver for the Atmel PIO4 controller
3 *
4 * Copyright (C) 2015 Atmel,
5 * 2015 Ludovic Desroches <ludovic.desroches@atmel.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/clk.h>
18#include <linux/gpio.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/platform_device.h>
24#include <linux/pinctrl/pinconf.h>
25#include <linux/pinctrl/pinconf-generic.h>
26#include <linux/pinctrl/pinctrl.h>
27#include <linux/pinctrl/pinmux.h>
28#include <linux/slab.h>
29#include "core.h"
30#include "pinconf.h"
31#include "pinctrl-utils.h"
32
33/*
34 * Warning:
35 * In order to not introduce confusion between Atmel PIO groups and pinctrl
36 * framework groups, Atmel PIO groups will be called banks, line is kept to
37 * designed the pin id into this bank.
38 */
39
40#define ATMEL_PIO_MSKR 0x0000
41#define ATMEL_PIO_CFGR 0x0004
42#define ATMEL_PIO_CFGR_FUNC_MASK GENMASK(2, 0)
43#define ATMEL_PIO_DIR_MASK BIT(8)
44#define ATMEL_PIO_PUEN_MASK BIT(9)
45#define ATMEL_PIO_PDEN_MASK BIT(10)
46#define ATMEL_PIO_IFEN_MASK BIT(12)
47#define ATMEL_PIO_IFSCEN_MASK BIT(13)
48#define ATMEL_PIO_OPD_MASK BIT(14)
49#define ATMEL_PIO_SCHMITT_MASK BIT(15)
50#define ATMEL_PIO_CFGR_EVTSEL_MASK GENMASK(26, 24)
51#define ATMEL_PIO_CFGR_EVTSEL_FALLING (0 << 24)
52#define ATMEL_PIO_CFGR_EVTSEL_RISING (1 << 24)
53#define ATMEL_PIO_CFGR_EVTSEL_BOTH (2 << 24)
54#define ATMEL_PIO_CFGR_EVTSEL_LOW (3 << 24)
55#define ATMEL_PIO_CFGR_EVTSEL_HIGH (4 << 24)
56#define ATMEL_PIO_PDSR 0x0008
57#define ATMEL_PIO_LOCKSR 0x000C
58#define ATMEL_PIO_SODR 0x0010
59#define ATMEL_PIO_CODR 0x0014
60#define ATMEL_PIO_ODSR 0x0018
61#define ATMEL_PIO_IER 0x0020
62#define ATMEL_PIO_IDR 0x0024
63#define ATMEL_PIO_IMR 0x0028
64#define ATMEL_PIO_ISR 0x002C
65#define ATMEL_PIO_IOFR 0x003C
66
67#define ATMEL_PIO_NPINS_PER_BANK 32
68#define ATMEL_PIO_BANK(pin_id) (pin_id / ATMEL_PIO_NPINS_PER_BANK)
69#define ATMEL_PIO_LINE(pin_id) (pin_id % ATMEL_PIO_NPINS_PER_BANK)
70#define ATMEL_PIO_BANK_OFFSET 0x40
71
72#define ATMEL_GET_PIN_NO(pinfunc) ((pinfunc) & 0xff)
73#define ATMEL_GET_PIN_FUNC(pinfunc) ((pinfunc >> 16) & 0xf)
74#define ATMEL_GET_PIN_IOSET(pinfunc) ((pinfunc >> 20) & 0xf)
75
76struct atmel_pioctrl_data {
77 unsigned nbanks;
78};
79
80struct atmel_group {
81 const char *name;
82 u32 pin;
83};
84
85struct atmel_pin {
86 unsigned pin_id;
87 unsigned mux;
88 unsigned ioset;
89 unsigned bank;
90 unsigned line;
91 const char *device;
92};
93
94/**
95 * struct atmel_pioctrl - Atmel PIO controller (pinmux + gpio)
96 * @reg_base: base address of the controller.
97 * @clk: clock of the controller.
98 * @nbanks: number of PIO groups, it can vary depending on the SoC.
99 * @pinctrl_dev: pinctrl device registered.
100 * @groups: groups table to provide group name and pin in the group to pinctrl.
101 * @group_names: group names table to provide all the group/pin names to
102 * pinctrl or gpio.
103 * @pins: pins table used for both pinctrl and gpio. pin_id, bank and line
104 * fields are set at probe time. Other ones are set when parsing dt
105 * pinctrl.
106 * @npins: number of pins.
107 * @gpio_chip: gpio chip registered.
108 * @irq_domain: irq domain for the gpio controller.
109 * @irqs: table containing the hw irq number of the bank. The index of the
110 * table is the bank id.
111 * @dev: device entry for the Atmel PIO controller.
112 * @node: node of the Atmel PIO controller.
113 */
114struct atmel_pioctrl {
115 void __iomem *reg_base;
116 struct clk *clk;
117 unsigned nbanks;
118 struct pinctrl_dev *pinctrl_dev;
119 struct atmel_group *groups;
120 const char * const *group_names;
121 struct atmel_pin **pins;
122 unsigned npins;
123 struct gpio_chip *gpio_chip;
124 struct irq_domain *irq_domain;
125 int *irqs;
126 unsigned *pm_wakeup_sources;
127 unsigned *pm_suspend_backup;
128 struct device *dev;
129 struct device_node *node;
130};
131
132static const char * const atmel_functions[] = {
133 "GPIO", "A", "B", "C", "D", "E", "F", "G"
134};
135
136/* --- GPIO --- */
137static unsigned int atmel_gpio_read(struct atmel_pioctrl *atmel_pioctrl,
138 unsigned int bank, unsigned int reg)
139{
140 return readl_relaxed(atmel_pioctrl->reg_base
141 + ATMEL_PIO_BANK_OFFSET * bank + reg);
142}
143
144static void atmel_gpio_write(struct atmel_pioctrl *atmel_pioctrl,
145 unsigned int bank, unsigned int reg,
146 unsigned int val)
147{
148 writel_relaxed(val, atmel_pioctrl->reg_base
149 + ATMEL_PIO_BANK_OFFSET * bank + reg);
150}
151
152static void atmel_gpio_irq_ack(struct irq_data *d)
153{
154 /*
155 * Nothing to do, interrupt is cleared when reading the status
156 * register.
157 */
158}
159
160static int atmel_gpio_irq_set_type(struct irq_data *d, unsigned type)
161{
162 struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
163 struct atmel_pin *pin = atmel_pioctrl->pins[d->hwirq];
164 unsigned reg;
165
166 atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_MSKR,
167 BIT(pin->line));
168 reg = atmel_gpio_read(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR);
169 reg &= (~ATMEL_PIO_CFGR_EVTSEL_MASK);
170
171 switch (type) {
172 case IRQ_TYPE_EDGE_RISING:
173 irq_set_handler_locked(d, handle_edge_irq);
174 reg |= ATMEL_PIO_CFGR_EVTSEL_RISING;
175 break;
176 case IRQ_TYPE_EDGE_FALLING:
177 irq_set_handler_locked(d, handle_edge_irq);
178 reg |= ATMEL_PIO_CFGR_EVTSEL_FALLING;
179 break;
180 case IRQ_TYPE_EDGE_BOTH:
181 irq_set_handler_locked(d, handle_edge_irq);
182 reg |= ATMEL_PIO_CFGR_EVTSEL_BOTH;
183 break;
184 case IRQ_TYPE_LEVEL_LOW:
185 irq_set_handler_locked(d, handle_level_irq);
186 reg |= ATMEL_PIO_CFGR_EVTSEL_LOW;
187 break;
188 case IRQ_TYPE_LEVEL_HIGH:
189 irq_set_handler_locked(d, handle_level_irq);
190 reg |= ATMEL_PIO_CFGR_EVTSEL_HIGH;
191 break;
192 case IRQ_TYPE_NONE:
193 default:
194 return -EINVAL;
195 }
196
197 atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR, reg);
198
199 return 0;
200}
201
202static void atmel_gpio_irq_mask(struct irq_data *d)
203{
204 struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
205 struct atmel_pin *pin = atmel_pioctrl->pins[d->hwirq];
206
207 atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_IDR,
208 BIT(pin->line));
209}
210
211static void atmel_gpio_irq_unmask(struct irq_data *d)
212{
213 struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
214 struct atmel_pin *pin = atmel_pioctrl->pins[d->hwirq];
215
216 atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_IER,
217 BIT(pin->line));
218}
219
220#ifdef CONFIG_PM_SLEEP
221
222static int atmel_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
223{
224 struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
225 int bank = ATMEL_PIO_BANK(d->hwirq);
226 int line = ATMEL_PIO_LINE(d->hwirq);
227
228 /* The gpio controller has one interrupt line per bank. */
229 irq_set_irq_wake(atmel_pioctrl->irqs[bank], on);
230
231 if (on)
232 atmel_pioctrl->pm_wakeup_sources[bank] |= BIT(line);
233 else
234 atmel_pioctrl->pm_wakeup_sources[bank] &= ~(BIT(line));
235
236 return 0;
237}
238#else
239#define atmel_gpio_irq_set_wake NULL
240#endif /* CONFIG_PM_SLEEP */
241
242static struct irq_chip atmel_gpio_irq_chip = {
243 .name = "GPIO",
244 .irq_ack = atmel_gpio_irq_ack,
245 .irq_mask = atmel_gpio_irq_mask,
246 .irq_unmask = atmel_gpio_irq_unmask,
247 .irq_set_type = atmel_gpio_irq_set_type,
248 .irq_set_wake = atmel_gpio_irq_set_wake,
249};
250
251static void atmel_gpio_irq_handler(struct irq_desc *desc)
252{
253 unsigned int irq = irq_desc_get_irq(desc);
254 struct atmel_pioctrl *atmel_pioctrl = irq_desc_get_handler_data(desc);
255 struct irq_chip *chip = irq_desc_get_chip(desc);
256 unsigned long isr;
257 int n, bank = -1;
258
259 /* Find from which bank is the irq received. */
260 for (n = 0; n < atmel_pioctrl->nbanks; n++) {
261 if (atmel_pioctrl->irqs[n] == irq) {
262 bank = n;
263 break;
264 }
265 }
266
267 if (bank < 0) {
268 dev_err(atmel_pioctrl->dev,
269 "no bank associated to irq %u\n", irq);
270 return;
271 }
272
273 chained_irq_enter(chip, desc);
274
275 for (;;) {
276 isr = (unsigned long)atmel_gpio_read(atmel_pioctrl, bank,
277 ATMEL_PIO_ISR);
278 isr &= (unsigned long)atmel_gpio_read(atmel_pioctrl, bank,
279 ATMEL_PIO_IMR);
280 if (!isr)
281 break;
282
283 for_each_set_bit(n, &isr, BITS_PER_LONG)
284 generic_handle_irq(gpio_to_irq(bank *
285 ATMEL_PIO_NPINS_PER_BANK + n));
286 }
287
288 chained_irq_exit(chip, desc);
289}
290
291static int atmel_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
292{
293 struct atmel_pioctrl *atmel_pioctrl = dev_get_drvdata(chip->dev);
294 struct atmel_pin *pin = atmel_pioctrl->pins[offset];
295 unsigned reg;
296
297 atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_MSKR,
298 BIT(pin->line));
299 reg = atmel_gpio_read(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR);
300 reg &= ~ATMEL_PIO_DIR_MASK;
301 atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR, reg);
302
303 return 0;
304}
305
306static int atmel_gpio_get(struct gpio_chip *chip, unsigned offset)
307{
308 struct atmel_pioctrl *atmel_pioctrl = dev_get_drvdata(chip->dev);
309 struct atmel_pin *pin = atmel_pioctrl->pins[offset];
310 unsigned reg;
311
312 reg = atmel_gpio_read(atmel_pioctrl, pin->bank, ATMEL_PIO_PDSR);
313
314 return !!(reg & BIT(pin->line));
315}
316
317static int atmel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
318 int value)
319{
320 struct atmel_pioctrl *atmel_pioctrl = dev_get_drvdata(chip->dev);
321 struct atmel_pin *pin = atmel_pioctrl->pins[offset];
322 unsigned reg;
323
324 atmel_gpio_write(atmel_pioctrl, pin->bank,
325 value ? ATMEL_PIO_SODR : ATMEL_PIO_CODR,
326 BIT(pin->line));
327
328 atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_MSKR,
329 BIT(pin->line));
330 reg = atmel_gpio_read(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR);
331 reg |= ATMEL_PIO_DIR_MASK;
332 atmel_gpio_write(atmel_pioctrl, pin->bank, ATMEL_PIO_CFGR, reg);
333
334 return 0;
335}
336
337static void atmel_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
338{
339 struct atmel_pioctrl *atmel_pioctrl = dev_get_drvdata(chip->dev);
340 struct atmel_pin *pin = atmel_pioctrl->pins[offset];
341
342 atmel_gpio_write(atmel_pioctrl, pin->bank,
343 val ? ATMEL_PIO_SODR : ATMEL_PIO_CODR,
344 BIT(pin->line));
345}
346
347static int atmel_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
348{
349 struct atmel_pioctrl *atmel_pioctrl = dev_get_drvdata(chip->dev);
350
351 return irq_find_mapping(atmel_pioctrl->irq_domain, offset);
352}
353
354static struct gpio_chip atmel_gpio_chip = {
355 .direction_input = atmel_gpio_direction_input,
356 .get = atmel_gpio_get,
357 .direction_output = atmel_gpio_direction_output,
358 .set = atmel_gpio_set,
359 .to_irq = atmel_gpio_to_irq,
360 .base = 0,
361};
362
363/* --- PINCTRL --- */
364static unsigned int atmel_pin_config_read(struct pinctrl_dev *pctldev,
365 unsigned pin_id)
366{
367 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
368 unsigned bank = atmel_pioctrl->pins[pin_id]->bank;
369 unsigned line = atmel_pioctrl->pins[pin_id]->line;
370 void __iomem *addr = atmel_pioctrl->reg_base
371 + bank * ATMEL_PIO_BANK_OFFSET;
372
373 writel_relaxed(BIT(line), addr + ATMEL_PIO_MSKR);
374 /* Have to set MSKR first, to access the right pin CFGR. */
375 wmb();
376
377 return readl_relaxed(addr + ATMEL_PIO_CFGR);
378}
379
380static void atmel_pin_config_write(struct pinctrl_dev *pctldev,
381 unsigned pin_id, u32 conf)
382{
383 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
384 unsigned bank = atmel_pioctrl->pins[pin_id]->bank;
385 unsigned line = atmel_pioctrl->pins[pin_id]->line;
386 void __iomem *addr = atmel_pioctrl->reg_base
387 + bank * ATMEL_PIO_BANK_OFFSET;
388
389 writel_relaxed(BIT(line), addr + ATMEL_PIO_MSKR);
390 /* Have to set MSKR first, to access the right pin CFGR. */
391 wmb();
392 writel_relaxed(conf, addr + ATMEL_PIO_CFGR);
393}
394
395static int atmel_pctl_get_groups_count(struct pinctrl_dev *pctldev)
396{
397 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
398
399 return atmel_pioctrl->npins;
400}
401
402static const char *atmel_pctl_get_group_name(struct pinctrl_dev *pctldev,
403 unsigned selector)
404{
405 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
406
407 return atmel_pioctrl->groups[selector].name;
408}
409
410static int atmel_pctl_get_group_pins(struct pinctrl_dev *pctldev,
411 unsigned selector, const unsigned **pins,
412 unsigned *num_pins)
413{
414 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
415
416 *pins = (unsigned *)&atmel_pioctrl->groups[selector].pin;
417 *num_pins = 1;
418
419 return 0;
420}
421
422struct atmel_group *atmel_pctl_find_group_by_pin(struct pinctrl_dev *pctldev,
423 unsigned pin)
424{
425 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
426 int i;
427
428 for (i = 0; i < atmel_pioctrl->npins; i++) {
429 struct atmel_group *grp = atmel_pioctrl->groups + i;
430
431 if (grp->pin == pin)
432 return grp;
433 }
434
435 return NULL;
436}
437
438static int atmel_pctl_xlate_pinfunc(struct pinctrl_dev *pctldev,
439 struct device_node *np,
440 u32 pinfunc, const char **grp_name,
441 const char **func_name)
442{
443 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
444 unsigned pin_id, func_id;
445 struct atmel_group *grp;
446
447 pin_id = ATMEL_GET_PIN_NO(pinfunc);
448 func_id = ATMEL_GET_PIN_FUNC(pinfunc);
449
450 if (func_id >= ARRAY_SIZE(atmel_functions))
451 return -EINVAL;
452
453 *func_name = atmel_functions[func_id];
454
455 grp = atmel_pctl_find_group_by_pin(pctldev, pin_id);
456 if (!grp)
457 return -EINVAL;
458 *grp_name = grp->name;
459
460 atmel_pioctrl->pins[pin_id]->mux = func_id;
461 atmel_pioctrl->pins[pin_id]->ioset = ATMEL_GET_PIN_IOSET(pinfunc);
462 /* Want the device name not the group one. */
463 if (np->parent == atmel_pioctrl->node)
464 atmel_pioctrl->pins[pin_id]->device = np->name;
465 else
466 atmel_pioctrl->pins[pin_id]->device = np->parent->name;
467
468 return 0;
469}
470
471static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
472 struct device_node *np,
473 struct pinctrl_map **map,
474 unsigned *reserved_maps,
475 unsigned *num_maps)
476{
477 unsigned num_pins, num_configs, reserve;
478 unsigned long *configs;
479 struct property *pins;
480 bool has_config;
481 u32 pinfunc;
482 int ret, i;
483
484 pins = of_find_property(np, "pinmux", NULL);
485 if (!pins)
486 return -EINVAL;
487
488 ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
489 &num_configs);
490 if (ret < 0) {
491 dev_err(pctldev->dev, "%s: could not parse node property\n",
492 of_node_full_name(np));
493 return ret;
494 }
495
496 if (num_configs)
497 has_config = true;
498
499 num_pins = pins->length / sizeof(u32);
500 if (!num_pins) {
501 dev_err(pctldev->dev, "no pins found in node %s\n",
502 of_node_full_name(np));
503 return -EINVAL;
504 }
505
506 /*
507 * Reserve maps, at least there is a mux map and an optional conf
508 * map for each pin.
509 */
510 reserve = 1;
511 if (has_config && num_pins >= 1)
512 reserve++;
513 reserve *= num_pins;
514 ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
515 reserve);
516 if (ret < 0)
517 return ret;
518
519 for (i = 0; i < num_pins; i++) {
520 const char *group, *func;
521
522 ret = of_property_read_u32_index(np, "pinmux", i, &pinfunc);
523 if (ret)
524 return ret;
525
526 ret = atmel_pctl_xlate_pinfunc(pctldev, np, pinfunc, &group,
527 &func);
528 if (ret)
529 return ret;
530
531 pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps,
532 group, func);
533
534 if (has_config) {
535 ret = pinctrl_utils_add_map_configs(pctldev, map,
536 reserved_maps, num_maps, group,
537 configs, num_configs,
538 PIN_MAP_TYPE_CONFIGS_GROUP);
539 if (ret < 0)
540 return ret;
541 }
542 }
543
544 return 0;
545}
546
547static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
548 struct device_node *np_config,
549 struct pinctrl_map **map,
550 unsigned *num_maps)
551{
552 struct device_node *np;
553 unsigned reserved_maps;
554 int ret;
555
556 *map = NULL;
557 *num_maps = 0;
558 reserved_maps = 0;
559
560 /*
561 * If all the pins of a device have the same configuration (or no one),
562 * it is useless to add a subnode, so directly parse node referenced by
563 * phandle.
564 */
565 ret = atmel_pctl_dt_subnode_to_map(pctldev, np_config, map,
566 &reserved_maps, num_maps);
567 if (ret) {
568 for_each_child_of_node(np_config, np) {
569 ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map,
570 &reserved_maps, num_maps);
571 if (ret < 0)
572 break;
573 }
574 }
575
576 if (ret < 0) {
577 pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
578 dev_err(pctldev->dev, "can't create maps for node %s\n",
579 np_config->full_name);
580 }
581
582 return ret;
583}
584
585static const struct pinctrl_ops atmel_pctlops = {
586 .get_groups_count = atmel_pctl_get_groups_count,
587 .get_group_name = atmel_pctl_get_group_name,
588 .get_group_pins = atmel_pctl_get_group_pins,
589 .dt_node_to_map = atmel_pctl_dt_node_to_map,
590 .dt_free_map = pinctrl_utils_dt_free_map,
591};
592
593static int atmel_pmx_get_functions_count(struct pinctrl_dev *pctldev)
594{
595 return ARRAY_SIZE(atmel_functions);
596}
597
598static const char *atmel_pmx_get_function_name(struct pinctrl_dev *pctldev,
599 unsigned selector)
600{
601 return atmel_functions[selector];
602}
603
604static int atmel_pmx_get_function_groups(struct pinctrl_dev *pctldev,
605 unsigned selector,
606 const char * const **groups,
607 unsigned * const num_groups)
608{
609 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
610
611 *groups = atmel_pioctrl->group_names;
612 *num_groups = atmel_pioctrl->npins;
613
614 return 0;
615}
616
617static int atmel_pmx_set_mux(struct pinctrl_dev *pctldev,
618 unsigned function,
619 unsigned group)
620{
621 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
622 unsigned pin;
623 u32 conf;
624
625 dev_dbg(pctldev->dev, "enable function %s group %s\n",
626 atmel_functions[function], atmel_pioctrl->groups[group].name);
627
628 pin = atmel_pioctrl->groups[group].pin;
629 conf = atmel_pin_config_read(pctldev, pin);
630 conf &= (~ATMEL_PIO_CFGR_FUNC_MASK);
631 conf |= (function & ATMEL_PIO_CFGR_FUNC_MASK);
632 dev_dbg(pctldev->dev, "pin: %u, conf: 0x%08x\n", pin, conf);
633 atmel_pin_config_write(pctldev, pin, conf);
634
635 return 0;
636}
637
638static const struct pinmux_ops atmel_pmxops = {
639 .get_functions_count = atmel_pmx_get_functions_count,
640 .get_function_name = atmel_pmx_get_function_name,
641 .get_function_groups = atmel_pmx_get_function_groups,
642 .set_mux = atmel_pmx_set_mux,
643};
644
645static int atmel_conf_pin_config_group_get(struct pinctrl_dev *pctldev,
646 unsigned group,
647 unsigned long *config)
648{
649 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
650 unsigned param = pinconf_to_config_param(*config), arg = 0;
651 struct atmel_group *grp = atmel_pioctrl->groups + group;
652 unsigned pin_id = grp->pin;
653 u32 res;
654
655 res = atmel_pin_config_read(pctldev, pin_id);
656
657 switch (param) {
658 case PIN_CONFIG_BIAS_PULL_UP:
659 if (!(res & ATMEL_PIO_PUEN_MASK))
660 return -EINVAL;
661 arg = 1;
662 break;
663 case PIN_CONFIG_BIAS_PULL_DOWN:
664 if ((res & ATMEL_PIO_PUEN_MASK) ||
665 (!(res & ATMEL_PIO_PDEN_MASK)))
666 return -EINVAL;
667 arg = 1;
668 break;
669 case PIN_CONFIG_BIAS_DISABLE:
670 if ((res & ATMEL_PIO_PUEN_MASK) ||
671 ((res & ATMEL_PIO_PDEN_MASK)))
672 return -EINVAL;
673 arg = 1;
674 break;
675 case PIN_CONFIG_DRIVE_OPEN_DRAIN:
676 if (!(res & ATMEL_PIO_OPD_MASK))
677 return -EINVAL;
678 arg = 1;
679 break;
680 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
681 if (!(res & ATMEL_PIO_SCHMITT_MASK))
682 return -EINVAL;
683 arg = 1;
684 break;
685 default:
686 return -ENOTSUPP;
687 }
688
689 *config = pinconf_to_config_packed(param, arg);
690 return 0;
691}
692
693static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
694 unsigned group,
695 unsigned long *configs,
696 unsigned num_configs)
697{
698 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
699 struct atmel_group *grp = atmel_pioctrl->groups + group;
700 unsigned bank, pin, pin_id = grp->pin;
701 u32 mask, conf = 0;
702 int i;
703
704 conf = atmel_pin_config_read(pctldev, pin_id);
705
706 for (i = 0; i < num_configs; i++) {
707 unsigned param = pinconf_to_config_param(configs[i]);
708 unsigned arg = pinconf_to_config_argument(configs[i]);
709
710 dev_dbg(pctldev->dev, "%s: pin=%u, config=0x%lx\n",
711 __func__, pin_id, configs[i]);
712
713 switch (param) {
714 case PIN_CONFIG_BIAS_DISABLE:
715 conf &= (~ATMEL_PIO_PUEN_MASK);
716 conf &= (~ATMEL_PIO_PDEN_MASK);
717 break;
718 case PIN_CONFIG_BIAS_PULL_UP:
719 conf |= ATMEL_PIO_PUEN_MASK;
720 break;
721 case PIN_CONFIG_BIAS_PULL_DOWN:
722 conf |= ATMEL_PIO_PDEN_MASK;
723 break;
724 case PIN_CONFIG_DRIVE_OPEN_DRAIN:
725 if (arg == 0)
726 conf &= (~ATMEL_PIO_OPD_MASK);
727 else
728 conf |= ATMEL_PIO_OPD_MASK;
729 break;
730 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
731 if (arg == 0)
732 conf |= ATMEL_PIO_SCHMITT_MASK;
733 else
734 conf &= (~ATMEL_PIO_SCHMITT_MASK);
735 break;
736 case PIN_CONFIG_INPUT_DEBOUNCE:
737 if (arg == 0) {
738 conf &= (~ATMEL_PIO_IFEN_MASK);
739 conf &= (~ATMEL_PIO_IFSCEN_MASK);
740 } else {
741 /*
742 * We don't care about the debounce value for several reasons:
743 * - can't have different debounce periods inside a same group,
744 * - the register to configure this period is a secure register.
745 * The debouncing filter can filter a pulse with a duration of less
746 * than 1/2 slow clock period.
747 */
748 conf |= ATMEL_PIO_IFEN_MASK;
749 conf |= ATMEL_PIO_IFSCEN_MASK;
750 }
751 break;
752 case PIN_CONFIG_OUTPUT:
753 conf |= ATMEL_PIO_DIR_MASK;
754 bank = ATMEL_PIO_BANK(pin_id);
755 pin = ATMEL_PIO_LINE(pin_id);
756 mask = 1 << pin;
757
758 if (arg == 0) {
759 writel_relaxed(mask, atmel_pioctrl->reg_base +
760 bank * ATMEL_PIO_BANK_OFFSET +
761 ATMEL_PIO_CODR);
762 } else {
763 writel_relaxed(mask, atmel_pioctrl->reg_base +
764 bank * ATMEL_PIO_BANK_OFFSET +
765 ATMEL_PIO_SODR);
766 }
767 break;
768 default:
769 dev_warn(pctldev->dev,
770 "unsupported configuration parameter: %u\n",
771 param);
772 continue;
773 }
774 }
775
776 dev_dbg(pctldev->dev, "%s: reg=0x%08x\n", __func__, conf);
777 atmel_pin_config_write(pctldev, pin_id, conf);
778
779 return 0;
780}
781
782static void atmel_conf_pin_config_dbg_show(struct pinctrl_dev *pctldev,
783 struct seq_file *s, unsigned pin_id)
784{
785 struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
786 u32 conf;
787
788 if (!atmel_pioctrl->pins[pin_id]->device)
789 return;
790
791 if (atmel_pioctrl->pins[pin_id])
792 seq_printf(s, " (%s, ioset %u) ",
793 atmel_pioctrl->pins[pin_id]->device,
794 atmel_pioctrl->pins[pin_id]->ioset);
795
796 conf = atmel_pin_config_read(pctldev, pin_id);
797 if (conf & ATMEL_PIO_PUEN_MASK)
798 seq_printf(s, "%s ", "pull-up");
799 if (conf & ATMEL_PIO_PDEN_MASK)
800 seq_printf(s, "%s ", "pull-down");
801 if (conf & ATMEL_PIO_IFEN_MASK)
802 seq_printf(s, "%s ", "debounce");
803 if (conf & ATMEL_PIO_OPD_MASK)
804 seq_printf(s, "%s ", "open-drain");
805 if (conf & ATMEL_PIO_SCHMITT_MASK)
806 seq_printf(s, "%s ", "schmitt");
807}
808
809static const struct pinconf_ops atmel_confops = {
810 .pin_config_group_get = atmel_conf_pin_config_group_get,
811 .pin_config_group_set = atmel_conf_pin_config_group_set,
812 .pin_config_dbg_show = atmel_conf_pin_config_dbg_show,
813};
814
815static struct pinctrl_desc atmel_pinctrl_desc = {
816 .name = "atmel_pinctrl",
817 .confops = &atmel_confops,
818 .pctlops = &atmel_pctlops,
819 .pmxops = &atmel_pmxops,
820};
821
822static int atmel_pctrl_suspend(struct device *dev)
823{
824 struct platform_device *pdev = to_platform_device(dev);
825 struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
826 int i;
827
828 /*
829 * For each bank, save IMR to restore it later and disable all GPIO
830 * interrupts excepting the ones marked as wakeup sources.
831 */
832 for (i = 0; i < atmel_pioctrl->nbanks; i++) {
833 atmel_pioctrl->pm_suspend_backup[i] =
834 atmel_gpio_read(atmel_pioctrl, i, ATMEL_PIO_IMR);
835 atmel_gpio_write(atmel_pioctrl, i, ATMEL_PIO_IDR,
836 ~atmel_pioctrl->pm_wakeup_sources[i]);
837 }
838
839 return 0;
840}
841
842static int atmel_pctrl_resume(struct device *dev)
843{
844 struct platform_device *pdev = to_platform_device(dev);
845 struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
846 int i;
847
848 for (i = 0; i < atmel_pioctrl->nbanks; i++)
849 atmel_gpio_write(atmel_pioctrl, i, ATMEL_PIO_IER,
850 atmel_pioctrl->pm_suspend_backup[i]);
851
852 return 0;
853}
854
855static const struct dev_pm_ops atmel_pctrl_pm_ops = {
856 SET_SYSTEM_SLEEP_PM_OPS(atmel_pctrl_suspend, atmel_pctrl_resume)
857};
858
859/*
860 * The number of banks can be different from a SoC to another one.
861 * We can have up to 16 banks.
862 */
863static const struct atmel_pioctrl_data atmel_sama5d2_pioctrl_data = {
864 .nbanks = 4,
865};
866
867static const struct of_device_id atmel_pctrl_of_match[] = {
868 {
869 .compatible = "atmel,sama5d2-pinctrl",
870 .data = &atmel_sama5d2_pioctrl_data,
871 }, {
872 /* sentinel */
873 }
874};
875MODULE_DEVICE_TABLE(of, atmel_pctrl_of_match);
876
877static int atmel_pinctrl_probe(struct platform_device *pdev)
878{
879 struct device *dev = &pdev->dev;
880 struct pinctrl_pin_desc *pin_desc;
881 const char **group_names;
882 const struct of_device_id *match;
883 int i, ret;
884 struct resource *res;
885 struct atmel_pioctrl *atmel_pioctrl;
886 struct atmel_pioctrl_data *atmel_pioctrl_data;
887
888 atmel_pioctrl = devm_kzalloc(dev, sizeof(*atmel_pioctrl), GFP_KERNEL);
889 if (!atmel_pioctrl)
890 return -ENOMEM;
891 atmel_pioctrl->dev = dev;
892 atmel_pioctrl->node = dev->of_node;
893 platform_set_drvdata(pdev, atmel_pioctrl);
894
895 match = of_match_node(atmel_pctrl_of_match, dev->of_node);
896 if (!match) {
897 dev_err(dev, "unknown compatible string\n");
898 return -ENODEV;
899 }
900 atmel_pioctrl_data = (struct atmel_pioctrl_data *)match->data;
901 atmel_pioctrl->nbanks = atmel_pioctrl_data->nbanks;
902 atmel_pioctrl->npins = atmel_pioctrl->nbanks * ATMEL_PIO_NPINS_PER_BANK;
903
904 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
905 if (!res) {
906 dev_err(dev, "unable to get atmel pinctrl resource\n");
907 return -EINVAL;
908 }
909 atmel_pioctrl->reg_base = devm_ioremap_resource(dev, res);
910 if (IS_ERR(atmel_pioctrl->reg_base))
911 return -EINVAL;
912
913 atmel_pioctrl->clk = devm_clk_get(dev, NULL);
914 if (IS_ERR(atmel_pioctrl->clk)) {
915 dev_err(dev, "failed to get clock\n");
916 return PTR_ERR(atmel_pioctrl->clk);
917 }
918
919 atmel_pioctrl->pins = devm_kzalloc(dev, sizeof(*atmel_pioctrl->pins)
920 * atmel_pioctrl->npins, GFP_KERNEL);
921 if (!atmel_pioctrl->pins)
922 return -ENOMEM;
923
924 pin_desc = devm_kzalloc(dev, sizeof(*pin_desc)
925 * atmel_pioctrl->npins, GFP_KERNEL);
926 if (!pin_desc)
927 return -ENOMEM;
928 atmel_pinctrl_desc.pins = pin_desc;
929 atmel_pinctrl_desc.npins = atmel_pioctrl->npins;
930
931 /* One pin is one group since a pin can achieve all functions. */
932 group_names = devm_kzalloc(dev, sizeof(*group_names)
933 * atmel_pioctrl->npins, GFP_KERNEL);
934 if (!group_names)
935 return -ENOMEM;
936 atmel_pioctrl->group_names = group_names;
937
938 atmel_pioctrl->groups = devm_kzalloc(&pdev->dev,
939 sizeof(*atmel_pioctrl->groups) * atmel_pioctrl->npins,
940 GFP_KERNEL);
941 if (!atmel_pioctrl->groups)
942 return -ENOMEM;
943 for (i = 0 ; i < atmel_pioctrl->npins; i++) {
944 struct atmel_group *group = atmel_pioctrl->groups + i;
945 unsigned bank = ATMEL_PIO_BANK(i);
946 unsigned line = ATMEL_PIO_LINE(i);
947
948 atmel_pioctrl->pins[i] = devm_kzalloc(dev,
949 sizeof(**atmel_pioctrl->pins), GFP_KERNEL);
950 if (!atmel_pioctrl->pins[i])
951 return -ENOMEM;
952
953 atmel_pioctrl->pins[i]->pin_id = i;
954 atmel_pioctrl->pins[i]->bank = bank;
955 atmel_pioctrl->pins[i]->line = line;
956
957 pin_desc[i].number = i;
958 /* Pin naming convention: P(bank_name)(bank_pin_number). */
959 pin_desc[i].name = kasprintf(GFP_KERNEL, "P%c%d",
960 bank + 'A', line);
961
962 group->name = group_names[i] = pin_desc[i].name;
963 group->pin = pin_desc[i].number;
964
965 dev_dbg(dev, "pin_id=%u, bank=%u, line=%u", i, bank, line);
966 }
967
968 atmel_pioctrl->gpio_chip = &atmel_gpio_chip;
969 atmel_pioctrl->gpio_chip->of_node = dev->of_node;
970 atmel_pioctrl->gpio_chip->ngpio = atmel_pioctrl->npins;
971 atmel_pioctrl->gpio_chip->label = dev_name(dev);
972 atmel_pioctrl->gpio_chip->dev = dev;
973 atmel_pioctrl->gpio_chip->names = atmel_pioctrl->group_names;
974
975 atmel_pioctrl->pm_wakeup_sources = devm_kzalloc(dev,
976 sizeof(*atmel_pioctrl->pm_wakeup_sources)
977 * atmel_pioctrl->nbanks, GFP_KERNEL);
978 if (!atmel_pioctrl->pm_wakeup_sources)
979 return -ENOMEM;
980
981 atmel_pioctrl->pm_suspend_backup = devm_kzalloc(dev,
982 sizeof(*atmel_pioctrl->pm_suspend_backup)
983 * atmel_pioctrl->nbanks, GFP_KERNEL);
984 if (!atmel_pioctrl->pm_suspend_backup)
985 return -ENOMEM;
986
987 atmel_pioctrl->irqs = devm_kzalloc(dev, sizeof(*atmel_pioctrl->irqs)
988 * atmel_pioctrl->nbanks, GFP_KERNEL);
989 if (!atmel_pioctrl->irqs)
990 return -ENOMEM;
991
992 /* There is one controller but each bank has its own irq line. */
993 for (i = 0; i < atmel_pioctrl->nbanks; i++) {
994 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
995 if (!res) {
996 dev_err(dev, "missing irq resource for group %c\n",
997 'A' + i);
998 return -EINVAL;
999 }
1000 atmel_pioctrl->irqs[i] = res->start;
1001 irq_set_chained_handler(res->start, atmel_gpio_irq_handler);
1002 irq_set_handler_data(res->start, atmel_pioctrl);
1003 dev_dbg(dev, "bank %i: hwirq=%u\n", i, res->start);
1004 }
1005
1006 atmel_pioctrl->irq_domain = irq_domain_add_linear(dev->of_node,
1007 atmel_pioctrl->gpio_chip->ngpio,
1008 &irq_domain_simple_ops, NULL);
1009 if (!atmel_pioctrl->irq_domain) {
1010 dev_err(dev, "can't add the irq domain\n");
1011 return -ENODEV;
1012 }
1013 atmel_pioctrl->irq_domain->name = "atmel gpio";
1014
1015 for (i = 0; i < atmel_pioctrl->npins; i++) {
1016 int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i);
1017
1018 irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip,
1019 handle_simple_irq);
1020 irq_set_chip_data(irq, atmel_pioctrl);
1021 dev_dbg(dev,
1022 "atmel gpio irq domain: hwirq: %d, linux irq: %d\n",
1023 i, irq);
1024 }
1025
1026 ret = clk_prepare_enable(atmel_pioctrl->clk);
1027 if (ret) {
1028 dev_err(dev, "failed to prepare and enable clock\n");
1029 goto clk_prepare_enable_error;
1030 }
1031
1032 atmel_pioctrl->pinctrl_dev = pinctrl_register(&atmel_pinctrl_desc,
1033 &pdev->dev,
1034 atmel_pioctrl);
1035 if (!atmel_pioctrl->pinctrl_dev) {
1036 dev_err(dev, "pinctrl registration failed\n");
1037 goto pinctrl_register_error;
1038 }
1039
1040 ret = gpiochip_add(atmel_pioctrl->gpio_chip);
1041 if (ret) {
1042 dev_err(dev, "failed to add gpiochip\n");
1043 goto gpiochip_add_error;
1044 }
1045
1046 ret = gpiochip_add_pin_range(atmel_pioctrl->gpio_chip, dev_name(dev),
1047 0, 0, atmel_pioctrl->gpio_chip->ngpio);
1048 if (ret) {
1049 dev_err(dev, "failed to add gpio pin range\n");
1050 goto gpiochip_add_pin_range_error;
1051 }
1052
1053 dev_info(&pdev->dev, "atmel pinctrl initialized\n");
1054
1055 return 0;
1056
1057clk_prepare_enable_error:
1058 irq_domain_remove(atmel_pioctrl->irq_domain);
1059pinctrl_register_error:
1060 clk_disable_unprepare(atmel_pioctrl->clk);
1061gpiochip_add_error:
1062 pinctrl_unregister(atmel_pioctrl->pinctrl_dev);
1063gpiochip_add_pin_range_error:
1064 gpiochip_remove(atmel_pioctrl->gpio_chip);
1065
1066 return ret;
1067}
1068
1069int atmel_pinctrl_remove(struct platform_device *pdev)
1070{
1071 struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
1072
1073 irq_domain_remove(atmel_pioctrl->irq_domain);
1074 clk_disable_unprepare(atmel_pioctrl->clk);
1075 pinctrl_unregister(atmel_pioctrl->pinctrl_dev);
1076 gpiochip_remove(atmel_pioctrl->gpio_chip);
1077
1078 return 0;
1079}
1080
1081static struct platform_driver atmel_pinctrl_driver = {
1082 .driver = {
1083 .name = "pinctrl-at91-pio4",
1084 .of_match_table = atmel_pctrl_of_match,
1085 .pm = &atmel_pctrl_pm_ops,
1086 },
1087 .probe = atmel_pinctrl_probe,
1088 .remove = atmel_pinctrl_remove,
1089};
1090module_platform_driver(atmel_pinctrl_driver);
1091
1092MODULE_AUTHOR(Ludovic Desroches <ludovic.desroches@atmel.com>);
1093MODULE_DESCRIPTION("Atmel PIO4 pinctrl driver");
1094MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index b0fde0f385e6..0d2fc0cff35e 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1122,8 +1122,10 @@ static int at91_pinctrl_parse_functions(struct device_node *np,
1122 func->groups[i] = child->name; 1122 func->groups[i] = child->name;
1123 grp = &info->groups[grp_index++]; 1123 grp = &info->groups[grp_index++];
1124 ret = at91_pinctrl_parse_groups(child, grp, info, i++); 1124 ret = at91_pinctrl_parse_groups(child, grp, info, i++);
1125 if (ret) 1125 if (ret) {
1126 of_node_put(child);
1126 return ret; 1127 return ret;
1128 }
1127 } 1129 }
1128 1130
1129 return 0; 1131 return 0;
@@ -1196,6 +1198,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
1196 ret = at91_pinctrl_parse_functions(child, info, i++); 1198 ret = at91_pinctrl_parse_functions(child, info, i++);
1197 if (ret) { 1199 if (ret) {
1198 dev_err(&pdev->dev, "failed to parse function\n"); 1200 dev_err(&pdev->dev, "failed to parse function\n");
1201 of_node_put(child);
1199 return ret; 1202 return ret;
1200 } 1203 }
1201 } 1204 }
@@ -1277,28 +1280,6 @@ static int at91_pinctrl_remove(struct platform_device *pdev)
1277 return 0; 1280 return 0;
1278} 1281}
1279 1282
1280static int at91_gpio_request(struct gpio_chip *chip, unsigned offset)
1281{
1282 /*
1283 * Map back to global GPIO space and request muxing, the direction
1284 * parameter does not matter for this controller.
1285 */
1286 int gpio = chip->base + offset;
1287 int bank = chip->base / chip->ngpio;
1288
1289 dev_dbg(chip->dev, "%s:%d pio%c%d(%d)\n", __func__, __LINE__,
1290 'A' + bank, offset, gpio);
1291
1292 return pinctrl_request_gpio(gpio);
1293}
1294
1295static void at91_gpio_free(struct gpio_chip *chip, unsigned offset)
1296{
1297 int gpio = chip->base + offset;
1298
1299 pinctrl_free_gpio(gpio);
1300}
1301
1302static int at91_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 1283static int at91_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
1303{ 1284{
1304 struct at91_gpio_chip *at91_gpio = to_at91_gpio_chip(chip); 1285 struct at91_gpio_chip *at91_gpio = to_at91_gpio_chip(chip);
@@ -1684,8 +1665,8 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
1684 1665
1685/* This structure is replicated for each GPIO block allocated at probe time */ 1666/* This structure is replicated for each GPIO block allocated at probe time */
1686static struct gpio_chip at91_gpio_template = { 1667static struct gpio_chip at91_gpio_template = {
1687 .request = at91_gpio_request, 1668 .request = gpiochip_generic_request,
1688 .free = at91_gpio_free, 1669 .free = gpiochip_generic_free,
1689 .get_direction = at91_gpio_get_direction, 1670 .get_direction = at91_gpio_get_direction,
1690 .direction_input = at91_gpio_direction_input, 1671 .direction_input = at91_gpio_direction_input,
1691 .get = at91_gpio_get, 1672 .get = at91_gpio_get,
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 9c9b88934bcc..813eb7c771ec 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -217,24 +217,6 @@ static inline struct u300_gpio *to_u300_gpio(struct gpio_chip *chip)
217 return container_of(chip, struct u300_gpio, chip); 217 return container_of(chip, struct u300_gpio, chip);
218} 218}
219 219
220static int u300_gpio_request(struct gpio_chip *chip, unsigned offset)
221{
222 /*
223 * Map back to global GPIO space and request muxing, the direction
224 * parameter does not matter for this controller.
225 */
226 int gpio = chip->base + offset;
227
228 return pinctrl_request_gpio(gpio);
229}
230
231static void u300_gpio_free(struct gpio_chip *chip, unsigned offset)
232{
233 int gpio = chip->base + offset;
234
235 pinctrl_free_gpio(gpio);
236}
237
238static int u300_gpio_get(struct gpio_chip *chip, unsigned offset) 220static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
239{ 221{
240 struct u300_gpio *gpio = to_u300_gpio(chip); 222 struct u300_gpio *gpio = to_u300_gpio(chip);
@@ -417,8 +399,8 @@ int u300_gpio_config_set(struct gpio_chip *chip, unsigned offset,
417static struct gpio_chip u300_gpio_chip = { 399static struct gpio_chip u300_gpio_chip = {
418 .label = "u300-gpio-chip", 400 .label = "u300-gpio-chip",
419 .owner = THIS_MODULE, 401 .owner = THIS_MODULE,
420 .request = u300_gpio_request, 402 .request = gpiochip_generic_request,
421 .free = u300_gpio_free, 403 .free = gpiochip_generic_free,
422 .get = u300_gpio_get, 404 .get = u300_gpio_get,
423 .set = u300_gpio_set, 405 .set = u300_gpio_set,
424 .direction_input = u300_gpio_direction_input, 406 .direction_input = u300_gpio_direction_input,
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 11f8b835d3b6..38a7799f8257 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -169,16 +169,6 @@ static struct pinmux_ops dc_pmxops = {
169 .gpio_request_enable = dc_pmx_request_gpio, 169 .gpio_request_enable = dc_pmx_request_gpio,
170}; 170};
171 171
172static int dc_gpio_request(struct gpio_chip *chip, unsigned gpio)
173{
174 return pinctrl_request_gpio(chip->base + gpio);
175}
176
177static void dc_gpio_free(struct gpio_chip *chip, unsigned gpio)
178{
179 pinctrl_free_gpio(chip->base + gpio);
180}
181
182static int dc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) 172static int dc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
183{ 173{
184 struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip); 174 struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
@@ -255,8 +245,8 @@ static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
255 245
256 chip->label = DRIVER_NAME; 246 chip->label = DRIVER_NAME;
257 chip->dev = pmap->dev; 247 chip->dev = pmap->dev;
258 chip->request = dc_gpio_request; 248 chip->request = gpiochip_generic_request;
259 chip->free = dc_gpio_free; 249 chip->free = gpiochip_generic_free;
260 chip->direction_input = dc_gpio_direction_input; 250 chip->direction_input = dc_gpio_direction_input;
261 chip->direction_output = dc_gpio_direction_output; 251 chip->direction_output = dc_gpio_direction_output;
262 chip->get = dc_gpio_get; 252 chip->get = dc_gpio_get;
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 952b1c623887..85c9046c690e 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1171,16 +1171,6 @@ static struct pinctrl_desc pistachio_pinctrl_desc = {
1171 .confops = &pistachio_pinconf_ops, 1171 .confops = &pistachio_pinconf_ops,
1172}; 1172};
1173 1173
1174static int pistachio_gpio_request(struct gpio_chip *chip, unsigned offset)
1175{
1176 return pinctrl_request_gpio(chip->base + offset);
1177}
1178
1179static void pistachio_gpio_free(struct gpio_chip *chip, unsigned offset)
1180{
1181 pinctrl_free_gpio(chip->base + offset);
1182}
1183
1184static int pistachio_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 1174static int pistachio_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
1185{ 1175{
1186 struct pistachio_gpio_bank *bank = gc_to_bank(chip); 1176 struct pistachio_gpio_bank *bank = gc_to_bank(chip);
@@ -1332,8 +1322,8 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc)
1332 .npins = _npins, \ 1322 .npins = _npins, \
1333 .gpio_chip = { \ 1323 .gpio_chip = { \
1334 .label = "GPIO" #_bank, \ 1324 .label = "GPIO" #_bank, \
1335 .request = pistachio_gpio_request, \ 1325 .request = gpiochip_generic_request, \
1336 .free = pistachio_gpio_free, \ 1326 .free = gpiochip_generic_free, \
1337 .get_direction = pistachio_gpio_get_direction, \ 1327 .get_direction = pistachio_gpio_get_direction, \
1338 .direction_input = pistachio_gpio_direction_input, \ 1328 .direction_input = pistachio_gpio_direction_input, \
1339 .direction_output = pistachio_gpio_direction_output, \ 1329 .direction_output = pistachio_gpio_direction_output, \
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 88bb707e107a..a0651128e23a 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1374,16 +1374,6 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
1374 * GPIO handling 1374 * GPIO handling
1375 */ 1375 */
1376 1376
1377static int rockchip_gpio_request(struct gpio_chip *chip, unsigned offset)
1378{
1379 return pinctrl_request_gpio(chip->base + offset);
1380}
1381
1382static void rockchip_gpio_free(struct gpio_chip *chip, unsigned offset)
1383{
1384 pinctrl_free_gpio(chip->base + offset);
1385}
1386
1387static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value) 1377static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
1388{ 1378{
1389 struct rockchip_pin_bank *bank = gc_to_pin_bank(gc); 1379 struct rockchip_pin_bank *bank = gc_to_pin_bank(gc);
@@ -1461,8 +1451,8 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
1461} 1451}
1462 1452
1463static const struct gpio_chip rockchip_gpiolib_chip = { 1453static const struct gpio_chip rockchip_gpiolib_chip = {
1464 .request = rockchip_gpio_request, 1454 .request = gpiochip_generic_request,
1465 .free = rockchip_gpio_free, 1455 .free = gpiochip_generic_free,
1466 .set = rockchip_gpio_set, 1456 .set = rockchip_gpio_set,
1467 .get = rockchip_gpio_get, 1457 .get = rockchip_gpio_get,
1468 .direction_input = rockchip_gpio_direction_input, 1458 .direction_input = rockchip_gpio_direction_input,
@@ -2089,6 +2079,21 @@ static struct rockchip_pin_ctrl rk2928_pin_ctrl = {
2089 .pull_calc_reg = rk2928_calc_pull_reg_and_bit, 2079 .pull_calc_reg = rk2928_calc_pull_reg_and_bit,
2090}; 2080};
2091 2081
2082static struct rockchip_pin_bank rk3036_pin_banks[] = {
2083 PIN_BANK(0, 32, "gpio0"),
2084 PIN_BANK(1, 32, "gpio1"),
2085 PIN_BANK(2, 32, "gpio2"),
2086};
2087
2088static struct rockchip_pin_ctrl rk3036_pin_ctrl = {
2089 .pin_banks = rk3036_pin_banks,
2090 .nr_banks = ARRAY_SIZE(rk3036_pin_banks),
2091 .label = "RK3036-GPIO",
2092 .type = RK2928,
2093 .grf_mux_offset = 0xa8,
2094 .pull_calc_reg = rk2928_calc_pull_reg_and_bit,
2095};
2096
2092static struct rockchip_pin_bank rk3066a_pin_banks[] = { 2097static struct rockchip_pin_bank rk3066a_pin_banks[] = {
2093 PIN_BANK(0, 32, "gpio0"), 2098 PIN_BANK(0, 32, "gpio0"),
2094 PIN_BANK(1, 32, "gpio1"), 2099 PIN_BANK(1, 32, "gpio1"),
@@ -2207,6 +2212,8 @@ static struct rockchip_pin_ctrl rk3368_pin_ctrl = {
2207static const struct of_device_id rockchip_pinctrl_dt_match[] = { 2212static const struct of_device_id rockchip_pinctrl_dt_match[] = {
2208 { .compatible = "rockchip,rk2928-pinctrl", 2213 { .compatible = "rockchip,rk2928-pinctrl",
2209 .data = (void *)&rk2928_pin_ctrl }, 2214 .data = (void *)&rk2928_pin_ctrl },
2215 { .compatible = "rockchip,rk3036-pinctrl",
2216 .data = (void *)&rk3036_pin_ctrl },
2210 { .compatible = "rockchip,rk3066a-pinctrl", 2217 { .compatible = "rockchip,rk3066a-pinctrl",
2211 .data = (void *)&rk3066a_pin_ctrl }, 2218 .data = (void *)&rk3066a_pin_ctrl },
2212 { .compatible = "rockchip,rk3066b-pinctrl", 2219 { .compatible = "rockchip,rk3066b-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 389526e704fb..b58d3f29148a 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -742,16 +742,6 @@ static void st_gpio_direction(struct st_gpio_bank *bank,
742 } 742 }
743} 743}
744 744
745static int st_gpio_request(struct gpio_chip *chip, unsigned offset)
746{
747 return pinctrl_request_gpio(chip->base + offset);
748}
749
750static void st_gpio_free(struct gpio_chip *chip, unsigned offset)
751{
752 pinctrl_free_gpio(chip->base + offset);
753}
754
755static int st_gpio_get(struct gpio_chip *chip, unsigned offset) 745static int st_gpio_get(struct gpio_chip *chip, unsigned offset)
756{ 746{
757 struct st_gpio_bank *bank = gpio_chip_to_bank(chip); 747 struct st_gpio_bank *bank = gpio_chip_to_bank(chip);
@@ -1490,8 +1480,8 @@ static void st_gpio_irqmux_handler(struct irq_desc *desc)
1490} 1480}
1491 1481
1492static struct gpio_chip st_gpio_template = { 1482static struct gpio_chip st_gpio_template = {
1493 .request = st_gpio_request, 1483 .request = gpiochip_generic_request,
1494 .free = st_gpio_free, 1484 .free = gpiochip_generic_free,
1495 .get = st_gpio_get, 1485 .get = st_gpio_get,
1496 .set = st_gpio_set, 1486 .set = st_gpio_set,
1497 .direction_input = st_gpio_direction_input, 1487 .direction_input = st_gpio_direction_input,
diff --git a/drivers/pinctrl/pinctrl-tegra-xusb.c b/drivers/pinctrl/pinctrl-tegra-xusb.c
index 2651d04bd1be..84a43e612952 100644
--- a/drivers/pinctrl/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/pinctrl-tegra-xusb.c
@@ -760,24 +760,15 @@ static const char * const tegra124_pcie_groups[] = {
760 "pcie-2", 760 "pcie-2",
761 "pcie-3", 761 "pcie-3",
762 "pcie-4", 762 "pcie-4",
763 "sata-0",
764}; 763};
765 764
766static const char * const tegra124_usb3_groups[] = { 765static const char * const tegra124_usb3_groups[] = {
767 "pcie-0", 766 "pcie-0",
768 "pcie-1", 767 "pcie-1",
769 "pcie-2",
770 "pcie-3",
771 "pcie-4",
772 "sata-0", 768 "sata-0",
773}; 769};
774 770
775static const char * const tegra124_sata_groups[] = { 771static const char * const tegra124_sata_groups[] = {
776 "pcie-0",
777 "pcie-1",
778 "pcie-2",
779 "pcie-3",
780 "pcie-4",
781 "sata-0", 772 "sata-0",
782}; 773};
783 774
diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c
index c349911708ef..b89ad3c0c731 100644
--- a/drivers/pinctrl/pinctrl-tz1090-pdc.c
+++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c
@@ -668,7 +668,7 @@ static int tz1090_pdc_pinconf_reg(struct pinctrl_dev *pctldev,
668 break; 668 break;
669 default: 669 default:
670 return -ENOTSUPP; 670 return -ENOTSUPP;
671 }; 671 }
672 672
673 /* Only input bias parameters supported */ 673 /* Only input bias parameters supported */
674 *reg = REG_GPIO_CONTROL2; 674 *reg = REG_GPIO_CONTROL2;
@@ -801,7 +801,7 @@ static int tz1090_pdc_pinconf_group_reg(struct pinctrl_dev *pctldev,
801 break; 801 break;
802 default: 802 default:
803 return -ENOTSUPP; 803 return -ENOTSUPP;
804 }; 804 }
805 805
806 /* Calculate field information */ 806 /* Calculate field information */
807 *mask = (BIT(*width) - 1) << *shift; 807 *mask = (BIT(*width) - 1) << *shift;
diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c
index 6d07a2f64d97..5425299d759d 100644
--- a/drivers/pinctrl/pinctrl-tz1090.c
+++ b/drivers/pinctrl/pinctrl-tz1090.c
@@ -1661,7 +1661,7 @@ static int tz1090_pinconf_reg(struct pinctrl_dev *pctldev,
1661 break; 1661 break;
1662 default: 1662 default:
1663 return -ENOTSUPP; 1663 return -ENOTSUPP;
1664 }; 1664 }
1665 1665
1666 /* Only input bias parameters supported */ 1666 /* Only input bias parameters supported */
1667 pu = &tz1090_pinconf_pullup[pin]; 1667 pu = &tz1090_pinconf_pullup[pin];
@@ -1790,7 +1790,7 @@ static int tz1090_pinconf_group_reg(struct pinctrl_dev *pctldev,
1790 break; 1790 break;
1791 default: 1791 default:
1792 return -ENOTSUPP; 1792 return -ENOTSUPP;
1793 }; 1793 }
1794 1794
1795 /* Calculate field information */ 1795 /* Calculate field information */
1796 *shift = g->slw_bit * *width; 1796 *shift = g->slw_bit * *width;
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 779950c62e53..ae724bdab3d3 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -682,28 +682,14 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
682 return 0; 682 return 0;
683} 683}
684 684
685static int xway_gpio_req(struct gpio_chip *chip, unsigned offset)
686{
687 int gpio = chip->base + offset;
688
689 return pinctrl_request_gpio(gpio);
690}
691
692static void xway_gpio_free(struct gpio_chip *chip, unsigned offset)
693{
694 int gpio = chip->base + offset;
695
696 pinctrl_free_gpio(gpio);
697}
698
699static struct gpio_chip xway_chip = { 685static struct gpio_chip xway_chip = {
700 .label = "gpio-xway", 686 .label = "gpio-xway",
701 .direction_input = xway_gpio_dir_in, 687 .direction_input = xway_gpio_dir_in,
702 .direction_output = xway_gpio_dir_out, 688 .direction_output = xway_gpio_dir_out,
703 .get = xway_gpio_get, 689 .get = xway_gpio_get,
704 .set = xway_gpio_set, 690 .set = xway_gpio_set,
705 .request = xway_gpio_req, 691 .request = gpiochip_generic_request,
706 .free = xway_gpio_free, 692 .free = gpiochip_generic_free,
707 .base = -1, 693 .base = -1,
708}; 694};
709 695
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 5aafea8c6590..d57b5eca7b98 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2014 Xilinx 4 * Copyright (C) 2014 Xilinx
5 * 5 *
6 * Sören Brinkmann <soren.brinkmann@xilinx.com> 6 * Sören Brinkmann <soren.brinkmann@xilinx.com>
7 * 7 *
8 * This program is free software: you can redistribute it and/or modify 8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -1230,8 +1230,18 @@ static struct platform_driver zynq_pinctrl_driver = {
1230 .remove = zynq_pinctrl_remove, 1230 .remove = zynq_pinctrl_remove,
1231}; 1231};
1232 1232
1233module_platform_driver(zynq_pinctrl_driver); 1233static int __init zynq_pinctrl_init(void)
1234{
1235 return platform_driver_register(&zynq_pinctrl_driver);
1236}
1237arch_initcall(zynq_pinctrl_init);
1238
1239static void __exit zynq_pinctrl_exit(void)
1240{
1241 platform_driver_unregister(&zynq_pinctrl_driver);
1242}
1243module_exit(zynq_pinctrl_exit);
1234 1244
1235MODULE_AUTHOR("Sören Brinkmann <soren.brinkmann@xilinx.com>"); 1245MODULE_AUTHOR("Sören Brinkmann <soren.brinkmann@xilinx.com>");
1236MODULE_DESCRIPTION("Xilinx Zynq pinctrl driver"); 1246MODULE_DESCRIPTION("Xilinx Zynq pinctrl driver");
1237MODULE_LICENSE("GPL"); 1247MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index a0c7407c1cac..146264a41ec8 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -458,18 +458,6 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
458 spin_unlock_irqrestore(&pctrl->lock, flags); 458 spin_unlock_irqrestore(&pctrl->lock, flags);
459} 459}
460 460
461static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
462{
463 int gpio = chip->base + offset;
464 return pinctrl_request_gpio(gpio);
465}
466
467static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
468{
469 int gpio = chip->base + offset;
470 return pinctrl_free_gpio(gpio);
471}
472
473#ifdef CONFIG_DEBUG_FS 461#ifdef CONFIG_DEBUG_FS
474#include <linux/seq_file.h> 462#include <linux/seq_file.h>
475 463
@@ -527,8 +515,8 @@ static struct gpio_chip msm_gpio_template = {
527 .direction_output = msm_gpio_direction_output, 515 .direction_output = msm_gpio_direction_output,
528 .get = msm_gpio_get, 516 .get = msm_gpio_get,
529 .set = msm_gpio_set, 517 .set = msm_gpio_set,
530 .request = msm_gpio_request, 518 .request = gpiochip_generic_request,
531 .free = msm_gpio_free, 519 .free = gpiochip_generic_free,
532 .dbg_show = msm_gpio_dbg_show, 520 .dbg_show = msm_gpio_dbg_show,
533}; 521};
534 522
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index bd1e24598e12..6c42ca14d2fd 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -546,16 +546,6 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
546 pmic_gpio_config_set(state->ctrl, pin, &config, 1); 546 pmic_gpio_config_set(state->ctrl, pin, &config, 1);
547} 547}
548 548
549static int pmic_gpio_request(struct gpio_chip *chip, unsigned base)
550{
551 return pinctrl_request_gpio(chip->base + base);
552}
553
554static void pmic_gpio_free(struct gpio_chip *chip, unsigned base)
555{
556 pinctrl_free_gpio(chip->base + base);
557}
558
559static int pmic_gpio_of_xlate(struct gpio_chip *chip, 549static int pmic_gpio_of_xlate(struct gpio_chip *chip,
560 const struct of_phandle_args *gpio_desc, 550 const struct of_phandle_args *gpio_desc,
561 u32 *flags) 551 u32 *flags)
@@ -595,8 +585,8 @@ static const struct gpio_chip pmic_gpio_gpio_template = {
595 .direction_output = pmic_gpio_direction_output, 585 .direction_output = pmic_gpio_direction_output,
596 .get = pmic_gpio_get, 586 .get = pmic_gpio_get,
597 .set = pmic_gpio_set, 587 .set = pmic_gpio_set,
598 .request = pmic_gpio_request, 588 .request = gpiochip_generic_request,
599 .free = pmic_gpio_free, 589 .free = gpiochip_generic_free,
600 .of_xlate = pmic_gpio_of_xlate, 590 .of_xlate = pmic_gpio_of_xlate,
601 .to_irq = pmic_gpio_to_irq, 591 .to_irq = pmic_gpio_to_irq,
602 .dbg_show = pmic_gpio_dbg_show, 592 .dbg_show = pmic_gpio_dbg_show,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index e3be3ce2cada..9ce0e30e33e8 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -604,16 +604,6 @@ static void pmic_mpp_set(struct gpio_chip *chip, unsigned pin, int value)
604 pmic_mpp_config_set(state->ctrl, pin, &config, 1); 604 pmic_mpp_config_set(state->ctrl, pin, &config, 1);
605} 605}
606 606
607static int pmic_mpp_request(struct gpio_chip *chip, unsigned base)
608{
609 return pinctrl_request_gpio(chip->base + base);
610}
611
612static void pmic_mpp_free(struct gpio_chip *chip, unsigned base)
613{
614 pinctrl_free_gpio(chip->base + base);
615}
616
617static int pmic_mpp_of_xlate(struct gpio_chip *chip, 607static int pmic_mpp_of_xlate(struct gpio_chip *chip,
618 const struct of_phandle_args *gpio_desc, 608 const struct of_phandle_args *gpio_desc,
619 u32 *flags) 609 u32 *flags)
@@ -653,8 +643,8 @@ static const struct gpio_chip pmic_mpp_gpio_template = {
653 .direction_output = pmic_mpp_direction_output, 643 .direction_output = pmic_mpp_direction_output,
654 .get = pmic_mpp_get, 644 .get = pmic_mpp_get,
655 .set = pmic_mpp_set, 645 .set = pmic_mpp_set,
656 .request = pmic_mpp_request, 646 .request = gpiochip_generic_request,
657 .free = pmic_mpp_free, 647 .free = gpiochip_generic_free,
658 .of_xlate = pmic_mpp_of_xlate, 648 .of_xlate = pmic_mpp_of_xlate,
659 .to_irq = pmic_mpp_to_irq, 649 .to_irq = pmic_mpp_to_irq,
660 .dbg_show = pmic_mpp_dbg_show, 650 .dbg_show = pmic_mpp_dbg_show,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index e1a3721bc8e5..d809c9eaa323 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -584,7 +584,7 @@ static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
584} 584}
585 585
586#else 586#else
587#define msm_gpio_dbg_show NULL 587#define pm8xxx_gpio_dbg_show NULL
588#endif 588#endif
589 589
590static struct gpio_chip pm8xxx_gpio_template = { 590static struct gpio_chip pm8xxx_gpio_template = {
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 6652b8d7f707..8982027de8e8 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -639,7 +639,7 @@ static void pm8xxx_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
639} 639}
640 640
641#else 641#else
642#define msm_mpp_dbg_show NULL 642#define pm8xxx_mpp_dbg_show NULL
643#endif 643#endif
644 644
645static struct gpio_chip pm8xxx_mpp_template = { 645static struct gpio_chip pm8xxx_mpp_template = {
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index 9ce0b8619d4c..82dc109f7ed4 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -284,7 +284,7 @@ static void exynos5440_dt_free_map(struct pinctrl_dev *pctldev,
284 if (!idx) 284 if (!idx)
285 kfree(map[idx].data.configs.group_or_pin); 285 kfree(map[idx].data.configs.group_or_pin);
286 } 286 }
287 }; 287 }
288 288
289 kfree(map); 289 kfree(map);
290} 290}
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index c760bf43d116..3f622ccd8eab 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -888,19 +888,9 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
888 return 0; 888 return 0;
889} 889}
890 890
891static int samsung_gpio_request(struct gpio_chip *chip, unsigned offset)
892{
893 return pinctrl_request_gpio(chip->base + offset);
894}
895
896static void samsung_gpio_free(struct gpio_chip *chip, unsigned offset)
897{
898 pinctrl_free_gpio(chip->base + offset);
899}
900
901static const struct gpio_chip samsung_gpiolib_chip = { 891static const struct gpio_chip samsung_gpiolib_chip = {
902 .request = samsung_gpio_request, 892 .request = gpiochip_generic_request,
903 .free = samsung_gpio_free, 893 .free = gpiochip_generic_free,
904 .set = samsung_gpio_set, 894 .set = samsung_gpio_set,
905 .get = samsung_gpio_get, 895 .get = samsung_gpio_get,
906 .direction_input = samsung_gpio_direction_input, 896 .direction_input = samsung_gpio_direction_input,
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 8e024c9c9115..35d6e95fa21f 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -65,6 +65,11 @@ config PINCTRL_PFC_R8A7794
65 depends on ARCH_R8A7794 65 depends on ARCH_R8A7794
66 select PINCTRL_SH_PFC 66 select PINCTRL_SH_PFC
67 67
68config PINCTRL_PFC_R8A7795
69 def_bool y
70 depends on ARCH_R8A7795
71 select PINCTRL_SH_PFC
72
68config PINCTRL_PFC_SH7203 73config PINCTRL_PFC_SH7203
69 def_bool y 74 def_bool y
70 depends on CPU_SUBTYPE_SH7203 75 depends on CPU_SUBTYPE_SH7203
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index ea2a60ef122a..173305fa3811 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7790) += pfc-r8a7790.o
12obj-$(CONFIG_PINCTRL_PFC_R8A7791) += pfc-r8a7791.o 12obj-$(CONFIG_PINCTRL_PFC_R8A7791) += pfc-r8a7791.o
13obj-$(CONFIG_PINCTRL_PFC_R8A7793) += pfc-r8a7791.o 13obj-$(CONFIG_PINCTRL_PFC_R8A7793) += pfc-r8a7791.o
14obj-$(CONFIG_PINCTRL_PFC_R8A7794) += pfc-r8a7794.o 14obj-$(CONFIG_PINCTRL_PFC_R8A7794) += pfc-r8a7794.o
15obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795.o
15obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o 16obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
16obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o 17obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
17obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o 18obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index fb9c44805234..181ea98a63b7 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -272,7 +272,7 @@ static int sh_pfc_get_config_reg(struct sh_pfc *pfc, u16 enum_id,
272static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, u16 mark, int pos, 272static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, u16 mark, int pos,
273 u16 *enum_idp) 273 u16 *enum_idp)
274{ 274{
275 const u16 *data = pfc->info->gpio_data; 275 const u16 *data = pfc->info->pinmux_data;
276 unsigned int k; 276 unsigned int k;
277 277
278 if (pos) { 278 if (pos) {
@@ -280,7 +280,7 @@ static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, u16 mark, int pos,
280 return pos + 1; 280 return pos + 1;
281 } 281 }
282 282
283 for (k = 0; k < pfc->info->gpio_data_size; k++) { 283 for (k = 0; k < pfc->info->pinmux_data_size; k++) {
284 if (data[k] == mark) { 284 if (data[k] == mark) {
285 *enum_idp = data[k + 1]; 285 *enum_idp = data[k + 1];
286 return k + 1; 286 return k + 1;
@@ -489,6 +489,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
489 .data = &r8a7794_pinmux_info, 489 .data = &r8a7794_pinmux_info,
490 }, 490 },
491#endif 491#endif
492#ifdef CONFIG_PINCTRL_PFC_R8A7795
493 {
494 .compatible = "renesas,pfc-r8a7795",
495 .data = &r8a7795_pinmux_info,
496 },
497#endif
492#ifdef CONFIG_PINCTRL_PFC_SH73A0 498#ifdef CONFIG_PINCTRL_PFC_SH73A0
493 { 499 {
494 .compatible = "renesas,pfc-sh73a0", 500 .compatible = "renesas,pfc-sh73a0",
@@ -587,12 +593,6 @@ static int sh_pfc_remove(struct platform_device *pdev)
587} 593}
588 594
589static const struct platform_device_id sh_pfc_id_table[] = { 595static const struct platform_device_id sh_pfc_id_table[] = {
590#ifdef CONFIG_PINCTRL_PFC_R8A7778
591 { "pfc-r8a7778", (kernel_ulong_t)&r8a7778_pinmux_info },
592#endif
593#ifdef CONFIG_PINCTRL_PFC_R8A7779
594 { "pfc-r8a7779", (kernel_ulong_t)&r8a7779_pinmux_info },
595#endif
596#ifdef CONFIG_PINCTRL_PFC_SH7203 596#ifdef CONFIG_PINCTRL_PFC_SH7203
597 { "pfc-sh7203", (kernel_ulong_t)&sh7203_pinmux_info }, 597 { "pfc-sh7203", (kernel_ulong_t)&sh7203_pinmux_info },
598#endif 598#endif
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 4c3c37bf7161..62f53b22ae85 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -46,7 +46,9 @@ struct sh_pfc {
46 unsigned int nr_gpio_pins; 46 unsigned int nr_gpio_pins;
47 47
48 struct sh_pfc_chip *gpio; 48 struct sh_pfc_chip *gpio;
49#ifdef CONFIG_SUPERH
49 struct sh_pfc_chip *func; 50 struct sh_pfc_chip *func;
51#endif
50 52
51 struct sh_pfc_pinctrl *pinctrl; 53 struct sh_pfc_pinctrl *pinctrl;
52}; 54};
@@ -73,6 +75,7 @@ extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
73extern const struct sh_pfc_soc_info r8a7791_pinmux_info; 75extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
74extern const struct sh_pfc_soc_info r8a7793_pinmux_info; 76extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
75extern const struct sh_pfc_soc_info r8a7794_pinmux_info; 77extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
78extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
76extern const struct sh_pfc_soc_info sh7203_pinmux_info; 79extern const struct sh_pfc_soc_info sh7203_pinmux_info;
77extern const struct sh_pfc_soc_info sh7264_pinmux_info; 80extern const struct sh_pfc_soc_info sh7264_pinmux_info;
78extern const struct sh_pfc_soc_info sh7269_pinmux_info; 81extern const struct sh_pfc_soc_info sh7269_pinmux_info;
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index ba353735ecf2..db3f09aa8993 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -219,10 +219,7 @@ static int gpio_pin_to_irq(struct gpio_chip *gc, unsigned offset)
219 return -ENOSYS; 219 return -ENOSYS;
220 220
221found: 221found:
222 if (pfc->num_irqs) 222 return pfc->irqs[i];
223 return pfc->irqs[i];
224 else
225 return pfc->info->gpio_irq[i].irq;
226} 223}
227 224
228static int gpio_pin_setup(struct sh_pfc_chip *chip) 225static int gpio_pin_setup(struct sh_pfc_chip *chip)
@@ -261,6 +258,7 @@ static int gpio_pin_setup(struct sh_pfc_chip *chip)
261 * Function GPIOs 258 * Function GPIOs
262 */ 259 */
263 260
261#ifdef CONFIG_SUPERH
264static int gpio_function_request(struct gpio_chip *gc, unsigned offset) 262static int gpio_function_request(struct gpio_chip *gc, unsigned offset)
265{ 263{
266 static bool __print_once; 264 static bool __print_once;
@@ -286,17 +284,12 @@ static int gpio_function_request(struct gpio_chip *gc, unsigned offset)
286 return ret; 284 return ret;
287} 285}
288 286
289static void gpio_function_free(struct gpio_chip *gc, unsigned offset)
290{
291}
292
293static int gpio_function_setup(struct sh_pfc_chip *chip) 287static int gpio_function_setup(struct sh_pfc_chip *chip)
294{ 288{
295 struct sh_pfc *pfc = chip->pfc; 289 struct sh_pfc *pfc = chip->pfc;
296 struct gpio_chip *gc = &chip->gpio_chip; 290 struct gpio_chip *gc = &chip->gpio_chip;
297 291
298 gc->request = gpio_function_request; 292 gc->request = gpio_function_request;
299 gc->free = gpio_function_free;
300 293
301 gc->label = pfc->info->name; 294 gc->label = pfc->info->name;
302 gc->owner = THIS_MODULE; 295 gc->owner = THIS_MODULE;
@@ -305,6 +298,7 @@ static int gpio_function_setup(struct sh_pfc_chip *chip)
305 298
306 return 0; 299 return 0;
307} 300}
301#endif
308 302
309/* ----------------------------------------------------------------------------- 303/* -----------------------------------------------------------------------------
310 * Register/unregister 304 * Register/unregister
@@ -344,7 +338,6 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
344 struct sh_pfc_chip *chip; 338 struct sh_pfc_chip *chip;
345 phys_addr_t address; 339 phys_addr_t address;
346 unsigned int i; 340 unsigned int i;
347 int ret;
348 341
349 if (pfc->info->data_regs == NULL) 342 if (pfc->info->data_regs == NULL)
350 return 0; 343 return 0;
@@ -367,7 +360,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
367 return 0; 360 return 0;
368 361
369 /* If we have IRQ resources make sure their number is correct. */ 362 /* If we have IRQ resources make sure their number is correct. */
370 if (pfc->num_irqs && pfc->num_irqs != pfc->info->gpio_irq_size) { 363 if (pfc->num_irqs != pfc->info->gpio_irq_size) {
371 dev_err(pfc->dev, "invalid number of IRQ resources\n"); 364 dev_err(pfc->dev, "invalid number of IRQ resources\n");
372 return -EINVAL; 365 return -EINVAL;
373 } 366 }
@@ -379,20 +372,26 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
379 372
380 pfc->gpio = chip; 373 pfc->gpio = chip;
381 374
382 /* Register the GPIO to pin mappings. As pins with GPIO ports must come 375 if (IS_ENABLED(CONFIG_OF) && pfc->dev->of_node)
383 * first in the ranges, skip the pins without GPIO ports by stopping at 376 return 0;
384 * the first range that contains such a pin. 377
378#ifdef CONFIG_SUPERH
379 /*
380 * Register the GPIO to pin mappings. As pins with GPIO ports
381 * must come first in the ranges, skip the pins without GPIO
382 * ports by stopping at the first range that contains such a
383 * pin.
385 */ 384 */
386 for (i = 0; i < pfc->nr_ranges; ++i) { 385 for (i = 0; i < pfc->nr_ranges; ++i) {
387 const struct sh_pfc_pin_range *range = &pfc->ranges[i]; 386 const struct sh_pfc_pin_range *range = &pfc->ranges[i];
387 int ret;
388 388
389 if (range->start >= pfc->nr_gpio_pins) 389 if (range->start >= pfc->nr_gpio_pins)
390 break; 390 break;
391 391
392 ret = gpiochip_add_pin_range(&chip->gpio_chip, 392 ret = gpiochip_add_pin_range(&chip->gpio_chip,
393 dev_name(pfc->dev), 393 dev_name(pfc->dev), range->start, range->start,
394 range->start, range->start, 394 range->end - range->start + 1);
395 range->end - range->start + 1);
396 if (ret < 0) 395 if (ret < 0)
397 return ret; 396 return ret;
398 } 397 }
@@ -406,6 +405,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
406 return PTR_ERR(chip); 405 return PTR_ERR(chip);
407 406
408 pfc->func = chip; 407 pfc->func = chip;
408#endif /* CONFIG_SUPERH */
409 409
410 return 0; 410 return 0;
411} 411}
@@ -413,7 +413,8 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
413int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc) 413int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc)
414{ 414{
415 gpiochip_remove(&pfc->gpio->gpio_chip); 415 gpiochip_remove(&pfc->gpio->gpio_chip);
416#ifdef CONFIG_SUPERH
416 gpiochip_remove(&pfc->func->gpio_chip); 417 gpiochip_remove(&pfc->func->gpio_chip);
417 418#endif
418 return 0; 419 return 0;
419} 420}
diff --git a/drivers/pinctrl/sh-pfc/pfc-emev2.c b/drivers/pinctrl/sh-pfc/pfc-emev2.c
index 849c6943ed30..02118ab336fc 100644
--- a/drivers/pinctrl/sh-pfc/pfc-emev2.c
+++ b/drivers/pinctrl/sh-pfc/pfc-emev2.c
@@ -1706,6 +1706,6 @@ const struct sh_pfc_soc_info emev2_pinmux_info = {
1706 1706
1707 .cfg_regs = pinmux_config_regs, 1707 .cfg_regs = pinmux_config_regs,
1708 1708
1709 .gpio_data = pinmux_data, 1709 .pinmux_data = pinmux_data,
1710 .gpio_data_size = ARRAY_SIZE(pinmux_data), 1710 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
1711}; 1711};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
index ba18d2e65e67..d9d9228b15fa 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
@@ -2603,64 +2603,64 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
2603}; 2603};
2604 2604
2605static const struct pinmux_irq pinmux_irqs[] = { 2605static const struct pinmux_irq pinmux_irqs[] = {
2606 PINMUX_IRQ(irq_pin(0), 0), 2606 PINMUX_IRQ(0), /* IRQ0 */
2607 PINMUX_IRQ(irq_pin(1), 1), 2607 PINMUX_IRQ(1), /* IRQ1 */
2608 PINMUX_IRQ(irq_pin(2), 2), 2608 PINMUX_IRQ(2), /* IRQ2 */
2609 PINMUX_IRQ(irq_pin(3), 3), 2609 PINMUX_IRQ(3), /* IRQ3 */
2610 PINMUX_IRQ(irq_pin(4), 4), 2610 PINMUX_IRQ(4), /* IRQ4 */
2611 PINMUX_IRQ(irq_pin(5), 5), 2611 PINMUX_IRQ(5), /* IRQ5 */
2612 PINMUX_IRQ(irq_pin(6), 6), 2612 PINMUX_IRQ(6), /* IRQ6 */
2613 PINMUX_IRQ(irq_pin(7), 7), 2613 PINMUX_IRQ(7), /* IRQ7 */
2614 PINMUX_IRQ(irq_pin(8), 8), 2614 PINMUX_IRQ(8), /* IRQ8 */
2615 PINMUX_IRQ(irq_pin(9), 9), 2615 PINMUX_IRQ(9), /* IRQ9 */
2616 PINMUX_IRQ(irq_pin(10), 10), 2616 PINMUX_IRQ(10), /* IRQ10 */
2617 PINMUX_IRQ(irq_pin(11), 11), 2617 PINMUX_IRQ(11), /* IRQ11 */
2618 PINMUX_IRQ(irq_pin(12), 12), 2618 PINMUX_IRQ(12), /* IRQ12 */
2619 PINMUX_IRQ(irq_pin(13), 13), 2619 PINMUX_IRQ(13), /* IRQ13 */
2620 PINMUX_IRQ(irq_pin(14), 14), 2620 PINMUX_IRQ(14), /* IRQ14 */
2621 PINMUX_IRQ(irq_pin(15), 15), 2621 PINMUX_IRQ(15), /* IRQ15 */
2622 PINMUX_IRQ(irq_pin(16), 320), 2622 PINMUX_IRQ(320), /* IRQ16 */
2623 PINMUX_IRQ(irq_pin(17), 321), 2623 PINMUX_IRQ(321), /* IRQ17 */
2624 PINMUX_IRQ(irq_pin(18), 85), 2624 PINMUX_IRQ(85), /* IRQ18 */
2625 PINMUX_IRQ(irq_pin(19), 84), 2625 PINMUX_IRQ(84), /* IRQ19 */
2626 PINMUX_IRQ(irq_pin(20), 160), 2626 PINMUX_IRQ(160), /* IRQ20 */
2627 PINMUX_IRQ(irq_pin(21), 161), 2627 PINMUX_IRQ(161), /* IRQ21 */
2628 PINMUX_IRQ(irq_pin(22), 162), 2628 PINMUX_IRQ(162), /* IRQ22 */
2629 PINMUX_IRQ(irq_pin(23), 163), 2629 PINMUX_IRQ(163), /* IRQ23 */
2630 PINMUX_IRQ(irq_pin(24), 175), 2630 PINMUX_IRQ(175), /* IRQ24 */
2631 PINMUX_IRQ(irq_pin(25), 176), 2631 PINMUX_IRQ(176), /* IRQ25 */
2632 PINMUX_IRQ(irq_pin(26), 177), 2632 PINMUX_IRQ(177), /* IRQ26 */
2633 PINMUX_IRQ(irq_pin(27), 178), 2633 PINMUX_IRQ(178), /* IRQ27 */
2634 PINMUX_IRQ(irq_pin(28), 322), 2634 PINMUX_IRQ(322), /* IRQ28 */
2635 PINMUX_IRQ(irq_pin(29), 323), 2635 PINMUX_IRQ(323), /* IRQ29 */
2636 PINMUX_IRQ(irq_pin(30), 324), 2636 PINMUX_IRQ(324), /* IRQ30 */
2637 PINMUX_IRQ(irq_pin(31), 192), 2637 PINMUX_IRQ(192), /* IRQ31 */
2638 PINMUX_IRQ(irq_pin(32), 193), 2638 PINMUX_IRQ(193), /* IRQ32 */
2639 PINMUX_IRQ(irq_pin(33), 194), 2639 PINMUX_IRQ(194), /* IRQ33 */
2640 PINMUX_IRQ(irq_pin(34), 195), 2640 PINMUX_IRQ(195), /* IRQ34 */
2641 PINMUX_IRQ(irq_pin(35), 196), 2641 PINMUX_IRQ(196), /* IRQ35 */
2642 PINMUX_IRQ(irq_pin(36), 197), 2642 PINMUX_IRQ(197), /* IRQ36 */
2643 PINMUX_IRQ(irq_pin(37), 198), 2643 PINMUX_IRQ(198), /* IRQ37 */
2644 PINMUX_IRQ(irq_pin(38), 199), 2644 PINMUX_IRQ(199), /* IRQ38 */
2645 PINMUX_IRQ(irq_pin(39), 200), 2645 PINMUX_IRQ(200), /* IRQ39 */
2646 PINMUX_IRQ(irq_pin(40), 66), 2646 PINMUX_IRQ(66), /* IRQ40 */
2647 PINMUX_IRQ(irq_pin(41), 102), 2647 PINMUX_IRQ(102), /* IRQ41 */
2648 PINMUX_IRQ(irq_pin(42), 103), 2648 PINMUX_IRQ(103), /* IRQ42 */
2649 PINMUX_IRQ(irq_pin(43), 109), 2649 PINMUX_IRQ(109), /* IRQ43 */
2650 PINMUX_IRQ(irq_pin(44), 110), 2650 PINMUX_IRQ(110), /* IRQ44 */
2651 PINMUX_IRQ(irq_pin(45), 111), 2651 PINMUX_IRQ(111), /* IRQ45 */
2652 PINMUX_IRQ(irq_pin(46), 112), 2652 PINMUX_IRQ(112), /* IRQ46 */
2653 PINMUX_IRQ(irq_pin(47), 113), 2653 PINMUX_IRQ(113), /* IRQ47 */
2654 PINMUX_IRQ(irq_pin(48), 114), 2654 PINMUX_IRQ(114), /* IRQ48 */
2655 PINMUX_IRQ(irq_pin(49), 115), 2655 PINMUX_IRQ(115), /* IRQ49 */
2656 PINMUX_IRQ(irq_pin(50), 301), 2656 PINMUX_IRQ(301), /* IRQ50 */
2657 PINMUX_IRQ(irq_pin(51), 290), 2657 PINMUX_IRQ(290), /* IRQ51 */
2658 PINMUX_IRQ(irq_pin(52), 296), 2658 PINMUX_IRQ(296), /* IRQ52 */
2659 PINMUX_IRQ(irq_pin(53), 325), 2659 PINMUX_IRQ(325), /* IRQ53 */
2660 PINMUX_IRQ(irq_pin(54), 326), 2660 PINMUX_IRQ(326), /* IRQ54 */
2661 PINMUX_IRQ(irq_pin(55), 327), 2661 PINMUX_IRQ(327), /* IRQ55 */
2662 PINMUX_IRQ(irq_pin(56), 328), 2662 PINMUX_IRQ(328), /* IRQ56 */
2663 PINMUX_IRQ(irq_pin(57), 329), 2663 PINMUX_IRQ(329), /* IRQ57 */
2664}; 2664};
2665 2665
2666#define PORTCR_PULMD_OFF (0 << 6) 2666#define PORTCR_PULMD_OFF (0 << 6)
@@ -2734,11 +2734,11 @@ const struct sh_pfc_soc_info r8a73a4_pinmux_info = {
2734 .functions = pinmux_functions, 2734 .functions = pinmux_functions,
2735 .nr_functions = ARRAY_SIZE(pinmux_functions), 2735 .nr_functions = ARRAY_SIZE(pinmux_functions),
2736 2736
2737 .cfg_regs = pinmux_config_regs, 2737 .cfg_regs = pinmux_config_regs,
2738 .data_regs = pinmux_data_regs, 2738 .data_regs = pinmux_data_regs,
2739 2739
2740 .gpio_data = pinmux_data, 2740 .pinmux_data = pinmux_data,
2741 .gpio_data_size = ARRAY_SIZE(pinmux_data), 2741 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
2742 2742
2743 .gpio_irq = pinmux_irqs, 2743 .gpio_irq = pinmux_irqs,
2744 .gpio_irq_size = ARRAY_SIZE(pinmux_irqs), 2744 .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index 82ef1862dd1b..279e9dd442e4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -3651,38 +3651,38 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
3651}; 3651};
3652 3652
3653static const struct pinmux_irq pinmux_irqs[] = { 3653static const struct pinmux_irq pinmux_irqs[] = {
3654 PINMUX_IRQ(irq_pin(0), 2, 13), /* IRQ0A */ 3654 PINMUX_IRQ(2, 13), /* IRQ0A */
3655 PINMUX_IRQ(irq_pin(1), 20), /* IRQ1A */ 3655 PINMUX_IRQ(20), /* IRQ1A */
3656 PINMUX_IRQ(irq_pin(2), 11, 12), /* IRQ2A */ 3656 PINMUX_IRQ(11, 12), /* IRQ2A */
3657 PINMUX_IRQ(irq_pin(3), 10, 14), /* IRQ3A */ 3657 PINMUX_IRQ(10, 14), /* IRQ3A */
3658 PINMUX_IRQ(irq_pin(4), 15, 172), /* IRQ4A */ 3658 PINMUX_IRQ(15, 172), /* IRQ4A */
3659 PINMUX_IRQ(irq_pin(5), 0, 1), /* IRQ5A */ 3659 PINMUX_IRQ(0, 1), /* IRQ5A */
3660 PINMUX_IRQ(irq_pin(6), 121, 173), /* IRQ6A */ 3660 PINMUX_IRQ(121, 173), /* IRQ6A */
3661 PINMUX_IRQ(irq_pin(7), 120, 209), /* IRQ7A */ 3661 PINMUX_IRQ(120, 209), /* IRQ7A */
3662 PINMUX_IRQ(irq_pin(8), 119), /* IRQ8A */ 3662 PINMUX_IRQ(119), /* IRQ8A */
3663 PINMUX_IRQ(irq_pin(9), 118, 210), /* IRQ9A */ 3663 PINMUX_IRQ(118, 210), /* IRQ9A */
3664 PINMUX_IRQ(irq_pin(10), 19), /* IRQ10A */ 3664 PINMUX_IRQ(19), /* IRQ10A */
3665 PINMUX_IRQ(irq_pin(11), 104), /* IRQ11A */ 3665 PINMUX_IRQ(104), /* IRQ11A */
3666 PINMUX_IRQ(irq_pin(12), 42, 97), /* IRQ12A */ 3666 PINMUX_IRQ(42, 97), /* IRQ12A */
3667 PINMUX_IRQ(irq_pin(13), 64, 98), /* IRQ13A */ 3667 PINMUX_IRQ(64, 98), /* IRQ13A */
3668 PINMUX_IRQ(irq_pin(14), 63, 99), /* IRQ14A */ 3668 PINMUX_IRQ(63, 99), /* IRQ14A */
3669 PINMUX_IRQ(irq_pin(15), 62, 100), /* IRQ15A */ 3669 PINMUX_IRQ(62, 100), /* IRQ15A */
3670 PINMUX_IRQ(irq_pin(16), 68, 211), /* IRQ16A */ 3670 PINMUX_IRQ(68, 211), /* IRQ16A */
3671 PINMUX_IRQ(irq_pin(17), 69), /* IRQ17A */ 3671 PINMUX_IRQ(69), /* IRQ17A */
3672 PINMUX_IRQ(irq_pin(18), 70), /* IRQ18A */ 3672 PINMUX_IRQ(70), /* IRQ18A */
3673 PINMUX_IRQ(irq_pin(19), 71), /* IRQ19A */ 3673 PINMUX_IRQ(71), /* IRQ19A */
3674 PINMUX_IRQ(irq_pin(20), 67), /* IRQ20A */ 3674 PINMUX_IRQ(67), /* IRQ20A */
3675 PINMUX_IRQ(irq_pin(21), 202), /* IRQ21A */ 3675 PINMUX_IRQ(202), /* IRQ21A */
3676 PINMUX_IRQ(irq_pin(22), 95), /* IRQ22A */ 3676 PINMUX_IRQ(95), /* IRQ22A */
3677 PINMUX_IRQ(irq_pin(23), 96), /* IRQ23A */ 3677 PINMUX_IRQ(96), /* IRQ23A */
3678 PINMUX_IRQ(irq_pin(24), 180), /* IRQ24A */ 3678 PINMUX_IRQ(180), /* IRQ24A */
3679 PINMUX_IRQ(irq_pin(25), 38), /* IRQ25A */ 3679 PINMUX_IRQ(38), /* IRQ25A */
3680 PINMUX_IRQ(irq_pin(26), 58, 81), /* IRQ26A */ 3680 PINMUX_IRQ(58, 81), /* IRQ26A */
3681 PINMUX_IRQ(irq_pin(27), 57, 168), /* IRQ27A */ 3681 PINMUX_IRQ(57, 168), /* IRQ27A */
3682 PINMUX_IRQ(irq_pin(28), 56, 169), /* IRQ28A */ 3682 PINMUX_IRQ(56, 169), /* IRQ28A */
3683 PINMUX_IRQ(irq_pin(29), 50, 170), /* IRQ29A */ 3683 PINMUX_IRQ(50, 170), /* IRQ29A */
3684 PINMUX_IRQ(irq_pin(30), 49, 171), /* IRQ30A */ 3684 PINMUX_IRQ(49, 171), /* IRQ30A */
3685 PINMUX_IRQ(irq_pin(31), 41, 167), /* IRQ31A */ 3685 PINMUX_IRQ(41, 167), /* IRQ31A */
3686}; 3686};
3687 3687
3688#define PORTnCR_PULMD_OFF (0 << 6) 3688#define PORTnCR_PULMD_OFF (0 << 6)
@@ -3774,8 +3774,8 @@ const struct sh_pfc_soc_info r8a7740_pinmux_info = {
3774 .cfg_regs = pinmux_config_regs, 3774 .cfg_regs = pinmux_config_regs,
3775 .data_regs = pinmux_data_regs, 3775 .data_regs = pinmux_data_regs,
3776 3776
3777 .gpio_data = pinmux_data, 3777 .pinmux_data = pinmux_data,
3778 .gpio_data_size = ARRAY_SIZE(pinmux_data), 3778 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
3779 3779
3780 .gpio_irq = pinmux_irqs, 3780 .gpio_irq = pinmux_irqs,
3781 .gpio_irq_size = ARRAY_SIZE(pinmux_irqs), 3781 .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index c7d610d1f3ef..bbd35dc1a0c4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2013 Renesas Solutions Corp. 4 * Copyright (C) 2013 Renesas Solutions Corp.
5 * Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 5 * Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 * Copyright (C) 2013 Cogent Embedded, Inc. 6 * Copyright (C) 2013 Cogent Embedded, Inc.
7 * Copyright (C) 2015 Ulrich Hecht
7 * 8 *
8 * based on 9 * based on
9 * Copyright (C) 2011 Renesas Solutions Corp. 10 * Copyright (C) 2011 Renesas Solutions Corp.
@@ -19,32 +20,37 @@
19 * GNU General Public License for more details. 20 * GNU General Public License for more details.
20 */ 21 */
21 22
22#include <linux/platform_data/gpio-rcar.h> 23#include <linux/io.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/pinctrl/pinconf-generic.h>
26#include "core.h"
24#include "sh_pfc.h" 27#include "sh_pfc.h"
25 28
26#define PORT_GP_27(bank, fn, sfx) \ 29#define PORT_GP_PUP_1(bank, pin, fn, sfx) \
27 PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \ 30 PORT_GP_CFG_1(bank, pin, fn, sfx, SH_PFC_PIN_CFG_PULL_UP)
28 PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \ 31
29 PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \ 32#define PORT_GP_PUP_27(bank, fn, sfx) \
30 PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \ 33 PORT_GP_PUP_1(bank, 0, fn, sfx), PORT_GP_PUP_1(bank, 1, fn, sfx), \
31 PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \ 34 PORT_GP_PUP_1(bank, 2, fn, sfx), PORT_GP_PUP_1(bank, 3, fn, sfx), \
32 PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \ 35 PORT_GP_PUP_1(bank, 4, fn, sfx), PORT_GP_PUP_1(bank, 5, fn, sfx), \
33 PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \ 36 PORT_GP_PUP_1(bank, 6, fn, sfx), PORT_GP_PUP_1(bank, 7, fn, sfx), \
34 PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \ 37 PORT_GP_PUP_1(bank, 8, fn, sfx), PORT_GP_PUP_1(bank, 9, fn, sfx), \
35 PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \ 38 PORT_GP_PUP_1(bank, 10, fn, sfx), PORT_GP_PUP_1(bank, 11, fn, sfx), \
36 PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \ 39 PORT_GP_PUP_1(bank, 12, fn, sfx), PORT_GP_PUP_1(bank, 13, fn, sfx), \
37 PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \ 40 PORT_GP_PUP_1(bank, 14, fn, sfx), PORT_GP_PUP_1(bank, 15, fn, sfx), \
38 PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \ 41 PORT_GP_PUP_1(bank, 16, fn, sfx), PORT_GP_PUP_1(bank, 17, fn, sfx), \
39 PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx), \ 42 PORT_GP_PUP_1(bank, 18, fn, sfx), PORT_GP_PUP_1(bank, 19, fn, sfx), \
40 PORT_GP_1(bank, 26, fn, sfx) 43 PORT_GP_PUP_1(bank, 20, fn, sfx), PORT_GP_PUP_1(bank, 21, fn, sfx), \
44 PORT_GP_PUP_1(bank, 22, fn, sfx), PORT_GP_PUP_1(bank, 23, fn, sfx), \
45 PORT_GP_PUP_1(bank, 24, fn, sfx), PORT_GP_PUP_1(bank, 25, fn, sfx), \
46 PORT_GP_PUP_1(bank, 26, fn, sfx)
41 47
42#define CPU_ALL_PORT(fn, sfx) \ 48#define CPU_ALL_PORT(fn, sfx) \
43 PORT_GP_32(0, fn, sfx), \ 49 PORT_GP_CFG_32(0, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
44 PORT_GP_32(1, fn, sfx), \ 50 PORT_GP_CFG_32(1, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
45 PORT_GP_32(2, fn, sfx), \ 51 PORT_GP_CFG_32(2, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
46 PORT_GP_32(3, fn, sfx), \ 52 PORT_GP_CFG_32(3, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \
47 PORT_GP_27(4, fn, sfx) 53 PORT_GP_PUP_27(4, fn, sfx)
48 54
49enum { 55enum {
50 PINMUX_RESERVED = 0, 56 PINMUX_RESERVED = 0,
@@ -2905,8 +2911,222 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
2905 { }, 2911 { },
2906}; 2912};
2907 2913
2914#define PUPR0 0x100
2915#define PUPR1 0x104
2916#define PUPR2 0x108
2917#define PUPR3 0x10c
2918#define PUPR4 0x110
2919#define PUPR5 0x114
2920
2921static const struct {
2922 u16 reg : 11;
2923 u16 bit : 5;
2924} pullups[] = {
2925 [RCAR_GP_PIN(0, 6)] = { PUPR0, 0 }, /* A0 */
2926 [RCAR_GP_PIN(0, 7)] = { PUPR0, 1 }, /* A1 */
2927 [RCAR_GP_PIN(0, 8)] = { PUPR0, 2 }, /* A2 */
2928 [RCAR_GP_PIN(0, 9)] = { PUPR0, 3 }, /* A3 */
2929 [RCAR_GP_PIN(0, 10)] = { PUPR0, 4 }, /* A4 */
2930 [RCAR_GP_PIN(0, 11)] = { PUPR0, 5 }, /* A5 */
2931 [RCAR_GP_PIN(0, 12)] = { PUPR0, 6 }, /* A6 */
2932 [RCAR_GP_PIN(0, 13)] = { PUPR0, 7 }, /* A7 */
2933 [RCAR_GP_PIN(0, 14)] = { PUPR0, 8 }, /* A8 */
2934 [RCAR_GP_PIN(0, 15)] = { PUPR0, 9 }, /* A9 */
2935 [RCAR_GP_PIN(0, 16)] = { PUPR0, 10 }, /* A10 */
2936 [RCAR_GP_PIN(0, 17)] = { PUPR0, 11 }, /* A11 */
2937 [RCAR_GP_PIN(0, 18)] = { PUPR0, 12 }, /* A12 */
2938 [RCAR_GP_PIN(0, 19)] = { PUPR0, 13 }, /* A13 */
2939 [RCAR_GP_PIN(0, 20)] = { PUPR0, 14 }, /* A14 */
2940 [RCAR_GP_PIN(0, 21)] = { PUPR0, 15 }, /* A15 */
2941 [RCAR_GP_PIN(0, 22)] = { PUPR0, 16 }, /* A16 */
2942 [RCAR_GP_PIN(0, 23)] = { PUPR0, 17 }, /* A17 */
2943 [RCAR_GP_PIN(0, 24)] = { PUPR0, 18 }, /* A18 */
2944 [RCAR_GP_PIN(0, 25)] = { PUPR0, 19 }, /* A19 */
2945 [RCAR_GP_PIN(0, 26)] = { PUPR0, 20 }, /* A20 */
2946 [RCAR_GP_PIN(0, 27)] = { PUPR0, 21 }, /* A21 */
2947 [RCAR_GP_PIN(0, 28)] = { PUPR0, 22 }, /* A22 */
2948 [RCAR_GP_PIN(0, 29)] = { PUPR0, 23 }, /* A23 */
2949 [RCAR_GP_PIN(0, 30)] = { PUPR0, 24 }, /* A24 */
2950 [RCAR_GP_PIN(0, 31)] = { PUPR0, 25 }, /* A25 */
2951 [RCAR_GP_PIN(1, 3)] = { PUPR0, 26 }, /* /EX_CS0 */
2952 [RCAR_GP_PIN(1, 4)] = { PUPR0, 27 }, /* /EX_CS1 */
2953 [RCAR_GP_PIN(1, 5)] = { PUPR0, 28 }, /* /EX_CS2 */
2954 [RCAR_GP_PIN(1, 6)] = { PUPR0, 29 }, /* /EX_CS3 */
2955 [RCAR_GP_PIN(1, 7)] = { PUPR0, 30 }, /* /EX_CS4 */
2956 [RCAR_GP_PIN(1, 8)] = { PUPR0, 31 }, /* /EX_CS5 */
2957
2958 [RCAR_GP_PIN(0, 0)] = { PUPR1, 0 }, /* /PRESETOUT */
2959 [RCAR_GP_PIN(0, 5)] = { PUPR1, 1 }, /* /BS */
2960 [RCAR_GP_PIN(1, 0)] = { PUPR1, 2 }, /* RD//WR */
2961 [RCAR_GP_PIN(1, 1)] = { PUPR1, 3 }, /* /WE0 */
2962 [RCAR_GP_PIN(1, 2)] = { PUPR1, 4 }, /* /WE1 */
2963 [RCAR_GP_PIN(1, 11)] = { PUPR1, 5 }, /* EX_WAIT0 */
2964 [RCAR_GP_PIN(1, 9)] = { PUPR1, 6 }, /* DREQ0 */
2965 [RCAR_GP_PIN(1, 10)] = { PUPR1, 7 }, /* DACK0 */
2966 [RCAR_GP_PIN(1, 12)] = { PUPR1, 8 }, /* IRQ0 */
2967 [RCAR_GP_PIN(1, 13)] = { PUPR1, 9 }, /* IRQ1 */
2968
2969 [RCAR_GP_PIN(1, 22)] = { PUPR2, 0 }, /* DU0_DR0 */
2970 [RCAR_GP_PIN(1, 23)] = { PUPR2, 1 }, /* DU0_DR1 */
2971 [RCAR_GP_PIN(1, 24)] = { PUPR2, 2 }, /* DU0_DR2 */
2972 [RCAR_GP_PIN(1, 25)] = { PUPR2, 3 }, /* DU0_DR3 */
2973 [RCAR_GP_PIN(1, 26)] = { PUPR2, 4 }, /* DU0_DR4 */
2974 [RCAR_GP_PIN(1, 27)] = { PUPR2, 5 }, /* DU0_DR5 */
2975 [RCAR_GP_PIN(1, 28)] = { PUPR2, 6 }, /* DU0_DR6 */
2976 [RCAR_GP_PIN(1, 29)] = { PUPR2, 7 }, /* DU0_DR7 */
2977 [RCAR_GP_PIN(1, 30)] = { PUPR2, 8 }, /* DU0_DG0 */
2978 [RCAR_GP_PIN(1, 31)] = { PUPR2, 9 }, /* DU0_DG1 */
2979 [RCAR_GP_PIN(2, 0)] = { PUPR2, 10 }, /* DU0_DG2 */
2980 [RCAR_GP_PIN(2, 1)] = { PUPR2, 11 }, /* DU0_DG3 */
2981 [RCAR_GP_PIN(2, 2)] = { PUPR2, 12 }, /* DU0_DG4 */
2982 [RCAR_GP_PIN(2, 3)] = { PUPR2, 13 }, /* DU0_DG5 */
2983 [RCAR_GP_PIN(2, 4)] = { PUPR2, 14 }, /* DU0_DG6 */
2984 [RCAR_GP_PIN(2, 5)] = { PUPR2, 15 }, /* DU0_DG7 */
2985 [RCAR_GP_PIN(2, 6)] = { PUPR2, 16 }, /* DU0_DB0 */
2986 [RCAR_GP_PIN(2, 7)] = { PUPR2, 17 }, /* DU0_DB1 */
2987 [RCAR_GP_PIN(2, 8)] = { PUPR2, 18 }, /* DU0_DB2 */
2988 [RCAR_GP_PIN(2, 9)] = { PUPR2, 19 }, /* DU0_DB3 */
2989 [RCAR_GP_PIN(2, 10)] = { PUPR2, 20 }, /* DU0_DB4 */
2990 [RCAR_GP_PIN(2, 11)] = { PUPR2, 21 }, /* DU0_DB5 */
2991 [RCAR_GP_PIN(2, 12)] = { PUPR2, 22 }, /* DU0_DB6 */
2992 [RCAR_GP_PIN(2, 13)] = { PUPR2, 23 }, /* DU0_DB7 */
2993 [RCAR_GP_PIN(2, 14)] = { PUPR2, 24 }, /* DU0_DOTCLKIN */
2994 [RCAR_GP_PIN(2, 15)] = { PUPR2, 25 }, /* DU0_DOTCLKOUT0 */
2995 [RCAR_GP_PIN(2, 17)] = { PUPR2, 26 }, /* DU0_HSYNC */
2996 [RCAR_GP_PIN(2, 18)] = { PUPR2, 27 }, /* DU0_VSYNC */
2997 [RCAR_GP_PIN(2, 19)] = { PUPR2, 28 }, /* DU0_EXODDF */
2998 [RCAR_GP_PIN(2, 20)] = { PUPR2, 29 }, /* DU0_DISP */
2999 [RCAR_GP_PIN(2, 21)] = { PUPR2, 30 }, /* DU0_CDE */
3000 [RCAR_GP_PIN(2, 16)] = { PUPR2, 31 }, /* DU0_DOTCLKOUT1 */
3001
3002 [RCAR_GP_PIN(3, 24)] = { PUPR3, 0 }, /* VI0_CLK */
3003 [RCAR_GP_PIN(3, 25)] = { PUPR3, 1 }, /* VI0_CLKENB */
3004 [RCAR_GP_PIN(3, 26)] = { PUPR3, 2 }, /* VI0_FIELD */
3005 [RCAR_GP_PIN(3, 27)] = { PUPR3, 3 }, /* /VI0_HSYNC */
3006 [RCAR_GP_PIN(3, 28)] = { PUPR3, 4 }, /* /VI0_VSYNC */
3007 [RCAR_GP_PIN(3, 29)] = { PUPR3, 5 }, /* VI0_DATA0 */
3008 [RCAR_GP_PIN(3, 30)] = { PUPR3, 6 }, /* VI0_DATA1 */
3009 [RCAR_GP_PIN(3, 31)] = { PUPR3, 7 }, /* VI0_DATA2 */
3010 [RCAR_GP_PIN(4, 0)] = { PUPR3, 8 }, /* VI0_DATA3 */
3011 [RCAR_GP_PIN(4, 1)] = { PUPR3, 9 }, /* VI0_DATA4 */
3012 [RCAR_GP_PIN(4, 2)] = { PUPR3, 10 }, /* VI0_DATA5 */
3013 [RCAR_GP_PIN(4, 3)] = { PUPR3, 11 }, /* VI0_DATA6 */
3014 [RCAR_GP_PIN(4, 4)] = { PUPR3, 12 }, /* VI0_DATA7 */
3015 [RCAR_GP_PIN(4, 5)] = { PUPR3, 13 }, /* VI0_G2 */
3016 [RCAR_GP_PIN(4, 6)] = { PUPR3, 14 }, /* VI0_G3 */
3017 [RCAR_GP_PIN(4, 7)] = { PUPR3, 15 }, /* VI0_G4 */
3018 [RCAR_GP_PIN(4, 8)] = { PUPR3, 16 }, /* VI0_G5 */
3019 [RCAR_GP_PIN(4, 21)] = { PUPR3, 17 }, /* VI1_DATA12 */
3020 [RCAR_GP_PIN(4, 22)] = { PUPR3, 18 }, /* VI1_DATA13 */
3021 [RCAR_GP_PIN(4, 23)] = { PUPR3, 19 }, /* VI1_DATA14 */
3022 [RCAR_GP_PIN(4, 24)] = { PUPR3, 20 }, /* VI1_DATA15 */
3023 [RCAR_GP_PIN(4, 9)] = { PUPR3, 21 }, /* ETH_REF_CLK */
3024 [RCAR_GP_PIN(4, 10)] = { PUPR3, 22 }, /* ETH_TXD0 */
3025 [RCAR_GP_PIN(4, 11)] = { PUPR3, 23 }, /* ETH_TXD1 */
3026 [RCAR_GP_PIN(4, 12)] = { PUPR3, 24 }, /* ETH_CRS_DV */
3027 [RCAR_GP_PIN(4, 13)] = { PUPR3, 25 }, /* ETH_TX_EN */
3028 [RCAR_GP_PIN(4, 14)] = { PUPR3, 26 }, /* ETH_RX_ER */
3029 [RCAR_GP_PIN(4, 15)] = { PUPR3, 27 }, /* ETH_RXD0 */
3030 [RCAR_GP_PIN(4, 16)] = { PUPR3, 28 }, /* ETH_RXD1 */
3031 [RCAR_GP_PIN(4, 17)] = { PUPR3, 29 }, /* ETH_MDC */
3032 [RCAR_GP_PIN(4, 18)] = { PUPR3, 30 }, /* ETH_MDIO */
3033 [RCAR_GP_PIN(4, 19)] = { PUPR3, 31 }, /* ETH_LINK */
3034
3035 [RCAR_GP_PIN(3, 6)] = { PUPR4, 0 }, /* SSI_SCK012 */
3036 [RCAR_GP_PIN(3, 7)] = { PUPR4, 1 }, /* SSI_WS012 */
3037 [RCAR_GP_PIN(3, 10)] = { PUPR4, 2 }, /* SSI_SDATA0 */
3038 [RCAR_GP_PIN(3, 9)] = { PUPR4, 3 }, /* SSI_SDATA1 */
3039 [RCAR_GP_PIN(3, 8)] = { PUPR4, 4 }, /* SSI_SDATA2 */
3040 [RCAR_GP_PIN(3, 2)] = { PUPR4, 5 }, /* SSI_SCK34 */
3041 [RCAR_GP_PIN(3, 3)] = { PUPR4, 6 }, /* SSI_WS34 */
3042 [RCAR_GP_PIN(3, 5)] = { PUPR4, 7 }, /* SSI_SDATA3 */
3043 [RCAR_GP_PIN(3, 4)] = { PUPR4, 8 }, /* SSI_SDATA4 */
3044 [RCAR_GP_PIN(2, 31)] = { PUPR4, 9 }, /* SSI_SCK5 */
3045 [RCAR_GP_PIN(3, 0)] = { PUPR4, 10 }, /* SSI_WS5 */
3046 [RCAR_GP_PIN(3, 1)] = { PUPR4, 11 }, /* SSI_SDATA5 */
3047 [RCAR_GP_PIN(2, 28)] = { PUPR4, 12 }, /* SSI_SCK6 */
3048 [RCAR_GP_PIN(2, 29)] = { PUPR4, 13 }, /* SSI_WS6 */
3049 [RCAR_GP_PIN(2, 30)] = { PUPR4, 14 }, /* SSI_SDATA6 */
3050 [RCAR_GP_PIN(2, 24)] = { PUPR4, 15 }, /* SSI_SCK78 */
3051 [RCAR_GP_PIN(2, 25)] = { PUPR4, 16 }, /* SSI_WS78 */
3052 [RCAR_GP_PIN(2, 27)] = { PUPR4, 17 }, /* SSI_SDATA7 */
3053 [RCAR_GP_PIN(2, 26)] = { PUPR4, 18 }, /* SSI_SDATA8 */
3054 [RCAR_GP_PIN(3, 23)] = { PUPR4, 19 }, /* TCLK0 */
3055 [RCAR_GP_PIN(3, 11)] = { PUPR4, 20 }, /* SD0_CLK */
3056 [RCAR_GP_PIN(3, 12)] = { PUPR4, 21 }, /* SD0_CMD */
3057 [RCAR_GP_PIN(3, 13)] = { PUPR4, 22 }, /* SD0_DAT0 */
3058 [RCAR_GP_PIN(3, 14)] = { PUPR4, 23 }, /* SD0_DAT1 */
3059 [RCAR_GP_PIN(3, 15)] = { PUPR4, 24 }, /* SD0_DAT2 */
3060 [RCAR_GP_PIN(3, 16)] = { PUPR4, 25 }, /* SD0_DAT3 */
3061 [RCAR_GP_PIN(3, 17)] = { PUPR4, 26 }, /* SD0_CD */
3062 [RCAR_GP_PIN(3, 18)] = { PUPR4, 27 }, /* SD0_WP */
3063 [RCAR_GP_PIN(2, 22)] = { PUPR4, 28 }, /* AUDIO_CLKA */
3064 [RCAR_GP_PIN(2, 23)] = { PUPR4, 29 }, /* AUDIO_CLKB */
3065 [RCAR_GP_PIN(1, 14)] = { PUPR4, 30 }, /* IRQ2 */
3066 [RCAR_GP_PIN(1, 15)] = { PUPR4, 31 }, /* IRQ3 */
3067
3068 [RCAR_GP_PIN(0, 1)] = { PUPR5, 0 }, /* PENC0 */
3069 [RCAR_GP_PIN(0, 2)] = { PUPR5, 1 }, /* PENC1 */
3070 [RCAR_GP_PIN(0, 3)] = { PUPR5, 2 }, /* USB_OVC0 */
3071 [RCAR_GP_PIN(0, 4)] = { PUPR5, 3 }, /* USB_OVC1 */
3072 [RCAR_GP_PIN(1, 16)] = { PUPR5, 4 }, /* SCIF_CLK */
3073 [RCAR_GP_PIN(1, 17)] = { PUPR5, 5 }, /* TX0 */
3074 [RCAR_GP_PIN(1, 18)] = { PUPR5, 6 }, /* RX0 */
3075 [RCAR_GP_PIN(1, 19)] = { PUPR5, 7 }, /* SCK0 */
3076 [RCAR_GP_PIN(1, 20)] = { PUPR5, 8 }, /* /CTS0 */
3077 [RCAR_GP_PIN(1, 21)] = { PUPR5, 9 }, /* /RTS0 */
3078 [RCAR_GP_PIN(3, 19)] = { PUPR5, 10 }, /* HSPI_CLK0 */
3079 [RCAR_GP_PIN(3, 20)] = { PUPR5, 11 }, /* /HSPI_CS0 */
3080 [RCAR_GP_PIN(3, 21)] = { PUPR5, 12 }, /* HSPI_RX0 */
3081 [RCAR_GP_PIN(3, 22)] = { PUPR5, 13 }, /* HSPI_TX0 */
3082 [RCAR_GP_PIN(4, 20)] = { PUPR5, 14 }, /* ETH_MAGIC */
3083 [RCAR_GP_PIN(4, 25)] = { PUPR5, 15 }, /* AVS1 */
3084 [RCAR_GP_PIN(4, 26)] = { PUPR5, 16 }, /* AVS2 */
3085};
3086
3087static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
3088 unsigned int pin)
3089{
3090 void __iomem *addr;
3091
3092 if (WARN_ON_ONCE(!pullups[pin].reg))
3093 return PIN_CONFIG_BIAS_DISABLE;
3094
3095 addr = pfc->windows->virt + pullups[pin].reg;
3096
3097 if (ioread32(addr) & BIT(pullups[pin].bit))
3098 return PIN_CONFIG_BIAS_PULL_UP;
3099 else
3100 return PIN_CONFIG_BIAS_DISABLE;
3101}
3102
3103static void r8a7778_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
3104 unsigned int bias)
3105{
3106 void __iomem *addr;
3107 u32 value;
3108 u32 bit;
3109
3110 if (WARN_ON_ONCE(!pullups[pin].reg))
3111 return;
3112
3113 addr = pfc->windows->virt + pullups[pin].reg;
3114 bit = BIT(pullups[pin].bit);
3115
3116 value = ioread32(addr) & ~bit;
3117 if (bias == PIN_CONFIG_BIAS_PULL_UP)
3118 value |= bit;
3119 iowrite32(value, addr);
3120}
3121
3122static const struct sh_pfc_soc_operations r8a7778_pfc_ops = {
3123 .get_bias = r8a7778_pinmux_get_bias,
3124 .set_bias = r8a7778_pinmux_set_bias,
3125};
3126
2908const struct sh_pfc_soc_info r8a7778_pinmux_info = { 3127const struct sh_pfc_soc_info r8a7778_pinmux_info = {
2909 .name = "r8a7778_pfc", 3128 .name = "r8a7778_pfc",
3129 .ops = &r8a7778_pfc_ops,
2910 3130
2911 .unlock_reg = 0xfffc0000, /* PMMR */ 3131 .unlock_reg = 0xfffc0000, /* PMMR */
2912 3132
@@ -2923,6 +3143,6 @@ const struct sh_pfc_soc_info r8a7778_pinmux_info = {
2923 3143
2924 .cfg_regs = pinmux_config_regs, 3144 .cfg_regs = pinmux_config_regs,
2925 3145
2926 .gpio_data = pinmux_data, 3146 .pinmux_data = pinmux_data,
2927 .gpio_data_size = ARRAY_SIZE(pinmux_data), 3147 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
2928}; 3148};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index f5c01e1e2615..ed4e0788035c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -20,7 +20,6 @@
20 */ 20 */
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/platform_data/gpio-rcar.h>
24 23
25#include "sh_pfc.h" 24#include "sh_pfc.h"
26 25
@@ -620,18 +619,18 @@ static const u16 pinmux_data[] = {
620 PINMUX_DATA(USB_PENC1_MARK, FN_USB_PENC1), 619 PINMUX_DATA(USB_PENC1_MARK, FN_USB_PENC1),
621 620
622 PINMUX_IPSR_DATA(IP0_2_0, USB_PENC2), 621 PINMUX_IPSR_DATA(IP0_2_0, USB_PENC2),
623 PINMUX_IPSR_MODSEL_DATA(IP0_2_0, SCK0, SEL_SCIF0_0), 622 PINMUX_IPSR_MSEL(IP0_2_0, SCK0, SEL_SCIF0_0),
624 PINMUX_IPSR_DATA(IP0_2_0, PWM1), 623 PINMUX_IPSR_DATA(IP0_2_0, PWM1),
625 PINMUX_IPSR_MODSEL_DATA(IP0_2_0, PWMFSW0, SEL_PWMFSW_0), 624 PINMUX_IPSR_MSEL(IP0_2_0, PWMFSW0, SEL_PWMFSW_0),
626 PINMUX_IPSR_MODSEL_DATA(IP0_2_0, SCIF_CLK, SEL_SCIF_0), 625 PINMUX_IPSR_MSEL(IP0_2_0, SCIF_CLK, SEL_SCIF_0),
627 PINMUX_IPSR_MODSEL_DATA(IP0_2_0, TCLK0_C, SEL_TMU0_2), 626 PINMUX_IPSR_MSEL(IP0_2_0, TCLK0_C, SEL_TMU0_2),
628 PINMUX_IPSR_DATA(IP0_5_3, BS), 627 PINMUX_IPSR_DATA(IP0_5_3, BS),
629 PINMUX_IPSR_DATA(IP0_5_3, SD1_DAT2), 628 PINMUX_IPSR_DATA(IP0_5_3, SD1_DAT2),
630 PINMUX_IPSR_DATA(IP0_5_3, MMC0_D2), 629 PINMUX_IPSR_DATA(IP0_5_3, MMC0_D2),
631 PINMUX_IPSR_DATA(IP0_5_3, FD2), 630 PINMUX_IPSR_DATA(IP0_5_3, FD2),
632 PINMUX_IPSR_DATA(IP0_5_3, ATADIR0), 631 PINMUX_IPSR_DATA(IP0_5_3, ATADIR0),
633 PINMUX_IPSR_DATA(IP0_5_3, SDSELF), 632 PINMUX_IPSR_DATA(IP0_5_3, SDSELF),
634 PINMUX_IPSR_MODSEL_DATA(IP0_5_3, HCTS1, SEL_HSCIF1_0), 633 PINMUX_IPSR_MSEL(IP0_5_3, HCTS1, SEL_HSCIF1_0),
635 PINMUX_IPSR_DATA(IP0_5_3, TX4_C), 634 PINMUX_IPSR_DATA(IP0_5_3, TX4_C),
636 PINMUX_IPSR_DATA(IP0_7_6, A0), 635 PINMUX_IPSR_DATA(IP0_7_6, A0),
637 PINMUX_IPSR_DATA(IP0_7_6, SD1_DAT3), 636 PINMUX_IPSR_DATA(IP0_7_6, SD1_DAT3),
@@ -641,37 +640,37 @@ static const u16 pinmux_data[] = {
641 PINMUX_IPSR_DATA(IP0_9_8, TX5_D), 640 PINMUX_IPSR_DATA(IP0_9_8, TX5_D),
642 PINMUX_IPSR_DATA(IP0_9_8, HSPI_TX2_B), 641 PINMUX_IPSR_DATA(IP0_9_8, HSPI_TX2_B),
643 PINMUX_IPSR_DATA(IP0_11_10, A21), 642 PINMUX_IPSR_DATA(IP0_11_10, A21),
644 PINMUX_IPSR_MODSEL_DATA(IP0_11_10, SCK5_D, SEL_SCIF5_3), 643 PINMUX_IPSR_MSEL(IP0_11_10, SCK5_D, SEL_SCIF5_3),
645 PINMUX_IPSR_MODSEL_DATA(IP0_11_10, HSPI_CLK2_B, SEL_HSPI2_1), 644 PINMUX_IPSR_MSEL(IP0_11_10, HSPI_CLK2_B, SEL_HSPI2_1),
646 PINMUX_IPSR_DATA(IP0_13_12, A22), 645 PINMUX_IPSR_DATA(IP0_13_12, A22),
647 PINMUX_IPSR_MODSEL_DATA(IP0_13_12, RX5_D, SEL_SCIF5_3), 646 PINMUX_IPSR_MSEL(IP0_13_12, RX5_D, SEL_SCIF5_3),
648 PINMUX_IPSR_MODSEL_DATA(IP0_13_12, HSPI_RX2_B, SEL_HSPI2_1), 647 PINMUX_IPSR_MSEL(IP0_13_12, HSPI_RX2_B, SEL_HSPI2_1),
649 PINMUX_IPSR_DATA(IP0_13_12, VI1_R0), 648 PINMUX_IPSR_DATA(IP0_13_12, VI1_R0),
650 PINMUX_IPSR_DATA(IP0_15_14, A23), 649 PINMUX_IPSR_DATA(IP0_15_14, A23),
651 PINMUX_IPSR_DATA(IP0_15_14, FCLE), 650 PINMUX_IPSR_DATA(IP0_15_14, FCLE),
652 PINMUX_IPSR_MODSEL_DATA(IP0_15_14, HSPI_CLK2, SEL_HSPI2_0), 651 PINMUX_IPSR_MSEL(IP0_15_14, HSPI_CLK2, SEL_HSPI2_0),
653 PINMUX_IPSR_DATA(IP0_15_14, VI1_R1), 652 PINMUX_IPSR_DATA(IP0_15_14, VI1_R1),
654 PINMUX_IPSR_DATA(IP0_18_16, A24), 653 PINMUX_IPSR_DATA(IP0_18_16, A24),
655 PINMUX_IPSR_DATA(IP0_18_16, SD1_CD), 654 PINMUX_IPSR_DATA(IP0_18_16, SD1_CD),
656 PINMUX_IPSR_DATA(IP0_18_16, MMC0_D4), 655 PINMUX_IPSR_DATA(IP0_18_16, MMC0_D4),
657 PINMUX_IPSR_DATA(IP0_18_16, FD4), 656 PINMUX_IPSR_DATA(IP0_18_16, FD4),
658 PINMUX_IPSR_MODSEL_DATA(IP0_18_16, HSPI_CS2, SEL_HSPI2_0), 657 PINMUX_IPSR_MSEL(IP0_18_16, HSPI_CS2, SEL_HSPI2_0),
659 PINMUX_IPSR_DATA(IP0_18_16, VI1_R2), 658 PINMUX_IPSR_DATA(IP0_18_16, VI1_R2),
660 PINMUX_IPSR_MODSEL_DATA(IP0_18_16, SSI_WS78_B, SEL_SSI7_1), 659 PINMUX_IPSR_MSEL(IP0_18_16, SSI_WS78_B, SEL_SSI7_1),
661 PINMUX_IPSR_DATA(IP0_22_19, A25), 660 PINMUX_IPSR_DATA(IP0_22_19, A25),
662 PINMUX_IPSR_DATA(IP0_22_19, SD1_WP), 661 PINMUX_IPSR_DATA(IP0_22_19, SD1_WP),
663 PINMUX_IPSR_DATA(IP0_22_19, MMC0_D5), 662 PINMUX_IPSR_DATA(IP0_22_19, MMC0_D5),
664 PINMUX_IPSR_DATA(IP0_22_19, FD5), 663 PINMUX_IPSR_DATA(IP0_22_19, FD5),
665 PINMUX_IPSR_MODSEL_DATA(IP0_22_19, HSPI_RX2, SEL_HSPI2_0), 664 PINMUX_IPSR_MSEL(IP0_22_19, HSPI_RX2, SEL_HSPI2_0),
666 PINMUX_IPSR_DATA(IP0_22_19, VI1_R3), 665 PINMUX_IPSR_DATA(IP0_22_19, VI1_R3),
667 PINMUX_IPSR_DATA(IP0_22_19, TX5_B), 666 PINMUX_IPSR_DATA(IP0_22_19, TX5_B),
668 PINMUX_IPSR_MODSEL_DATA(IP0_22_19, SSI_SDATA7_B, SEL_SSI7_1), 667 PINMUX_IPSR_MSEL(IP0_22_19, SSI_SDATA7_B, SEL_SSI7_1),
669 PINMUX_IPSR_MODSEL_DATA(IP0_22_19, CTS0_B, SEL_SCIF0_1), 668 PINMUX_IPSR_MSEL(IP0_22_19, CTS0_B, SEL_SCIF0_1),
670 PINMUX_IPSR_DATA(IP0_24_23, CLKOUT), 669 PINMUX_IPSR_DATA(IP0_24_23, CLKOUT),
671 PINMUX_IPSR_DATA(IP0_24_23, TX3C_IRDA_TX_C), 670 PINMUX_IPSR_DATA(IP0_24_23, TX3C_IRDA_TX_C),
672 PINMUX_IPSR_DATA(IP0_24_23, PWM0_B), 671 PINMUX_IPSR_DATA(IP0_24_23, PWM0_B),
673 PINMUX_IPSR_DATA(IP0_25, CS0), 672 PINMUX_IPSR_DATA(IP0_25, CS0),
674 PINMUX_IPSR_MODSEL_DATA(IP0_25, HSPI_CS2_B, SEL_HSPI2_1), 673 PINMUX_IPSR_MSEL(IP0_25, HSPI_CS2_B, SEL_HSPI2_1),
675 PINMUX_IPSR_DATA(IP0_27_26, CS1_A26), 674 PINMUX_IPSR_DATA(IP0_27_26, CS1_A26),
676 PINMUX_IPSR_DATA(IP0_27_26, HSPI_TX2), 675 PINMUX_IPSR_DATA(IP0_27_26, HSPI_TX2),
677 PINMUX_IPSR_DATA(IP0_27_26, SDSELF_B), 676 PINMUX_IPSR_DATA(IP0_27_26, SDSELF_B),
@@ -679,11 +678,11 @@ static const u16 pinmux_data[] = {
679 PINMUX_IPSR_DATA(IP0_30_28, FWE), 678 PINMUX_IPSR_DATA(IP0_30_28, FWE),
680 PINMUX_IPSR_DATA(IP0_30_28, ATAG0), 679 PINMUX_IPSR_DATA(IP0_30_28, ATAG0),
681 PINMUX_IPSR_DATA(IP0_30_28, VI1_R7), 680 PINMUX_IPSR_DATA(IP0_30_28, VI1_R7),
682 PINMUX_IPSR_MODSEL_DATA(IP0_30_28, HRTS1, SEL_HSCIF1_0), 681 PINMUX_IPSR_MSEL(IP0_30_28, HRTS1, SEL_HSCIF1_0),
683 PINMUX_IPSR_MODSEL_DATA(IP0_30_28, RX4_C, SEL_SCIF4_2), 682 PINMUX_IPSR_MSEL(IP0_30_28, RX4_C, SEL_SCIF4_2),
684 683
685 PINMUX_IPSR_DATA(IP1_1_0, EX_CS0), 684 PINMUX_IPSR_DATA(IP1_1_0, EX_CS0),
686 PINMUX_IPSR_MODSEL_DATA(IP1_1_0, RX3_C_IRDA_RX_C, SEL_SCIF3_2), 685 PINMUX_IPSR_MSEL(IP1_1_0, RX3_C_IRDA_RX_C, SEL_SCIF3_2),
687 PINMUX_IPSR_DATA(IP1_1_0, MMC0_D6), 686 PINMUX_IPSR_DATA(IP1_1_0, MMC0_D6),
688 PINMUX_IPSR_DATA(IP1_1_0, FD6), 687 PINMUX_IPSR_DATA(IP1_1_0, FD6),
689 PINMUX_IPSR_DATA(IP1_3_2, EX_CS1), 688 PINMUX_IPSR_DATA(IP1_3_2, EX_CS1),
@@ -700,45 +699,45 @@ static const u16 pinmux_data[] = {
700 PINMUX_IPSR_DATA(IP1_10_7, FRE), 699 PINMUX_IPSR_DATA(IP1_10_7, FRE),
701 PINMUX_IPSR_DATA(IP1_10_7, ATACS10), 700 PINMUX_IPSR_DATA(IP1_10_7, ATACS10),
702 PINMUX_IPSR_DATA(IP1_10_7, VI1_R4), 701 PINMUX_IPSR_DATA(IP1_10_7, VI1_R4),
703 PINMUX_IPSR_MODSEL_DATA(IP1_10_7, RX5_B, SEL_SCIF5_1), 702 PINMUX_IPSR_MSEL(IP1_10_7, RX5_B, SEL_SCIF5_1),
704 PINMUX_IPSR_MODSEL_DATA(IP1_10_7, HSCK1, SEL_HSCIF1_0), 703 PINMUX_IPSR_MSEL(IP1_10_7, HSCK1, SEL_HSCIF1_0),
705 PINMUX_IPSR_MODSEL_DATA(IP1_10_7, SSI_SDATA8_B, SEL_SSI8_1), 704 PINMUX_IPSR_MSEL(IP1_10_7, SSI_SDATA8_B, SEL_SSI8_1),
706 PINMUX_IPSR_MODSEL_DATA(IP1_10_7, RTS0_B_TANS_B, SEL_SCIF0_1), 705 PINMUX_IPSR_MSEL(IP1_10_7, RTS0_B_TANS_B, SEL_SCIF0_1),
707 PINMUX_IPSR_MODSEL_DATA(IP1_10_7, SSI_SDATA9, SEL_SSI9_0), 706 PINMUX_IPSR_MSEL(IP1_10_7, SSI_SDATA9, SEL_SSI9_0),
708 PINMUX_IPSR_DATA(IP1_14_11, EX_CS4), 707 PINMUX_IPSR_DATA(IP1_14_11, EX_CS4),
709 PINMUX_IPSR_DATA(IP1_14_11, SD1_DAT0), 708 PINMUX_IPSR_DATA(IP1_14_11, SD1_DAT0),
710 PINMUX_IPSR_DATA(IP1_14_11, MMC0_D0), 709 PINMUX_IPSR_DATA(IP1_14_11, MMC0_D0),
711 PINMUX_IPSR_DATA(IP1_14_11, FD0), 710 PINMUX_IPSR_DATA(IP1_14_11, FD0),
712 PINMUX_IPSR_DATA(IP1_14_11, ATARD0), 711 PINMUX_IPSR_DATA(IP1_14_11, ATARD0),
713 PINMUX_IPSR_DATA(IP1_14_11, VI1_R5), 712 PINMUX_IPSR_DATA(IP1_14_11, VI1_R5),
714 PINMUX_IPSR_MODSEL_DATA(IP1_14_11, SCK5_B, SEL_SCIF5_1), 713 PINMUX_IPSR_MSEL(IP1_14_11, SCK5_B, SEL_SCIF5_1),
715 PINMUX_IPSR_DATA(IP1_14_11, HTX1), 714 PINMUX_IPSR_DATA(IP1_14_11, HTX1),
716 PINMUX_IPSR_DATA(IP1_14_11, TX2_E), 715 PINMUX_IPSR_DATA(IP1_14_11, TX2_E),
717 PINMUX_IPSR_DATA(IP1_14_11, TX0_B), 716 PINMUX_IPSR_DATA(IP1_14_11, TX0_B),
718 PINMUX_IPSR_MODSEL_DATA(IP1_14_11, SSI_SCK9, SEL_SSI9_0), 717 PINMUX_IPSR_MSEL(IP1_14_11, SSI_SCK9, SEL_SSI9_0),
719 PINMUX_IPSR_DATA(IP1_18_15, EX_CS5), 718 PINMUX_IPSR_DATA(IP1_18_15, EX_CS5),
720 PINMUX_IPSR_DATA(IP1_18_15, SD1_DAT1), 719 PINMUX_IPSR_DATA(IP1_18_15, SD1_DAT1),
721 PINMUX_IPSR_DATA(IP1_18_15, MMC0_D1), 720 PINMUX_IPSR_DATA(IP1_18_15, MMC0_D1),
722 PINMUX_IPSR_DATA(IP1_18_15, FD1), 721 PINMUX_IPSR_DATA(IP1_18_15, FD1),
723 PINMUX_IPSR_DATA(IP1_18_15, ATAWR0), 722 PINMUX_IPSR_DATA(IP1_18_15, ATAWR0),
724 PINMUX_IPSR_DATA(IP1_18_15, VI1_R6), 723 PINMUX_IPSR_DATA(IP1_18_15, VI1_R6),
725 PINMUX_IPSR_MODSEL_DATA(IP1_18_15, HRX1, SEL_HSCIF1_0), 724 PINMUX_IPSR_MSEL(IP1_18_15, HRX1, SEL_HSCIF1_0),
726 PINMUX_IPSR_MODSEL_DATA(IP1_18_15, RX2_E, SEL_SCIF2_4), 725 PINMUX_IPSR_MSEL(IP1_18_15, RX2_E, SEL_SCIF2_4),
727 PINMUX_IPSR_MODSEL_DATA(IP1_18_15, RX0_B, SEL_SCIF0_1), 726 PINMUX_IPSR_MSEL(IP1_18_15, RX0_B, SEL_SCIF0_1),
728 PINMUX_IPSR_MODSEL_DATA(IP1_18_15, SSI_WS9, SEL_SSI9_0), 727 PINMUX_IPSR_MSEL(IP1_18_15, SSI_WS9, SEL_SSI9_0),
729 PINMUX_IPSR_DATA(IP1_20_19, MLB_CLK), 728 PINMUX_IPSR_DATA(IP1_20_19, MLB_CLK),
730 PINMUX_IPSR_DATA(IP1_20_19, PWM2), 729 PINMUX_IPSR_DATA(IP1_20_19, PWM2),
731 PINMUX_IPSR_MODSEL_DATA(IP1_20_19, SCK4, SEL_SCIF4_0), 730 PINMUX_IPSR_MSEL(IP1_20_19, SCK4, SEL_SCIF4_0),
732 PINMUX_IPSR_DATA(IP1_22_21, MLB_SIG), 731 PINMUX_IPSR_DATA(IP1_22_21, MLB_SIG),
733 PINMUX_IPSR_DATA(IP1_22_21, PWM3), 732 PINMUX_IPSR_DATA(IP1_22_21, PWM3),
734 PINMUX_IPSR_DATA(IP1_22_21, TX4), 733 PINMUX_IPSR_DATA(IP1_22_21, TX4),
735 PINMUX_IPSR_DATA(IP1_24_23, MLB_DAT), 734 PINMUX_IPSR_DATA(IP1_24_23, MLB_DAT),
736 PINMUX_IPSR_DATA(IP1_24_23, PWM4), 735 PINMUX_IPSR_DATA(IP1_24_23, PWM4),
737 PINMUX_IPSR_MODSEL_DATA(IP1_24_23, RX4, SEL_SCIF4_0), 736 PINMUX_IPSR_MSEL(IP1_24_23, RX4, SEL_SCIF4_0),
738 PINMUX_IPSR_DATA(IP1_28_25, HTX0), 737 PINMUX_IPSR_DATA(IP1_28_25, HTX0),
739 PINMUX_IPSR_DATA(IP1_28_25, TX1), 738 PINMUX_IPSR_DATA(IP1_28_25, TX1),
740 PINMUX_IPSR_DATA(IP1_28_25, SDATA), 739 PINMUX_IPSR_DATA(IP1_28_25, SDATA),
741 PINMUX_IPSR_MODSEL_DATA(IP1_28_25, CTS0_C, SEL_SCIF0_2), 740 PINMUX_IPSR_MSEL(IP1_28_25, CTS0_C, SEL_SCIF0_2),
742 PINMUX_IPSR_DATA(IP1_28_25, SUB_TCK), 741 PINMUX_IPSR_DATA(IP1_28_25, SUB_TCK),
743 PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE2), 742 PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE2),
744 PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE10), 743 PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE10),
@@ -746,39 +745,39 @@ static const u16 pinmux_data[] = {
746 PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE26), 745 PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE26),
747 PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE34), 746 PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE34),
748 747
749 PINMUX_IPSR_MODSEL_DATA(IP2_3_0, HRX0, SEL_HSCIF0_0), 748 PINMUX_IPSR_MSEL(IP2_3_0, HRX0, SEL_HSCIF0_0),
750 PINMUX_IPSR_MODSEL_DATA(IP2_3_0, RX1, SEL_SCIF1_0), 749 PINMUX_IPSR_MSEL(IP2_3_0, RX1, SEL_SCIF1_0),
751 PINMUX_IPSR_DATA(IP2_3_0, SCKZ), 750 PINMUX_IPSR_DATA(IP2_3_0, SCKZ),
752 PINMUX_IPSR_MODSEL_DATA(IP2_3_0, RTS0_C_TANS_C, SEL_SCIF0_2), 751 PINMUX_IPSR_MSEL(IP2_3_0, RTS0_C_TANS_C, SEL_SCIF0_2),
753 PINMUX_IPSR_DATA(IP2_3_0, SUB_TDI), 752 PINMUX_IPSR_DATA(IP2_3_0, SUB_TDI),
754 PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE3), 753 PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE3),
755 PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE11), 754 PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE11),
756 PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE19), 755 PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE19),
757 PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE27), 756 PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE27),
758 PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE35), 757 PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE35),
759 PINMUX_IPSR_MODSEL_DATA(IP2_7_4, HSCK0, SEL_HSCIF0_0), 758 PINMUX_IPSR_MSEL(IP2_7_4, HSCK0, SEL_HSCIF0_0),
760 PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SCK1, SEL_SCIF1_0), 759 PINMUX_IPSR_MSEL(IP2_7_4, SCK1, SEL_SCIF1_0),
761 PINMUX_IPSR_DATA(IP2_7_4, MTS), 760 PINMUX_IPSR_DATA(IP2_7_4, MTS),
762 PINMUX_IPSR_DATA(IP2_7_4, PWM5), 761 PINMUX_IPSR_DATA(IP2_7_4, PWM5),
763 PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SCK0_C, SEL_SCIF0_2), 762 PINMUX_IPSR_MSEL(IP2_7_4, SCK0_C, SEL_SCIF0_2),
764 PINMUX_IPSR_MODSEL_DATA(IP2_7_4, SSI_SDATA9_B, SEL_SSI9_1), 763 PINMUX_IPSR_MSEL(IP2_7_4, SSI_SDATA9_B, SEL_SSI9_1),
765 PINMUX_IPSR_DATA(IP2_7_4, SUB_TDO), 764 PINMUX_IPSR_DATA(IP2_7_4, SUB_TDO),
766 PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE0), 765 PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE0),
767 PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE8), 766 PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE8),
768 PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE16), 767 PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE16),
769 PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE24), 768 PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE24),
770 PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE32), 769 PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE32),
771 PINMUX_IPSR_MODSEL_DATA(IP2_11_8, HCTS0, SEL_HSCIF0_0), 770 PINMUX_IPSR_MSEL(IP2_11_8, HCTS0, SEL_HSCIF0_0),
772 PINMUX_IPSR_MODSEL_DATA(IP2_11_8, CTS1, SEL_SCIF1_0), 771 PINMUX_IPSR_MSEL(IP2_11_8, CTS1, SEL_SCIF1_0),
773 PINMUX_IPSR_DATA(IP2_11_8, STM), 772 PINMUX_IPSR_DATA(IP2_11_8, STM),
774 PINMUX_IPSR_DATA(IP2_11_8, PWM0_D), 773 PINMUX_IPSR_DATA(IP2_11_8, PWM0_D),
775 PINMUX_IPSR_MODSEL_DATA(IP2_11_8, RX0_C, SEL_SCIF0_2), 774 PINMUX_IPSR_MSEL(IP2_11_8, RX0_C, SEL_SCIF0_2),
776 PINMUX_IPSR_MODSEL_DATA(IP2_11_8, SCIF_CLK_C, SEL_SCIF_2), 775 PINMUX_IPSR_MSEL(IP2_11_8, SCIF_CLK_C, SEL_SCIF_2),
777 PINMUX_IPSR_DATA(IP2_11_8, SUB_TRST), 776 PINMUX_IPSR_DATA(IP2_11_8, SUB_TRST),
778 PINMUX_IPSR_MODSEL_DATA(IP2_11_8, TCLK1_B, SEL_TMU1_1), 777 PINMUX_IPSR_MSEL(IP2_11_8, TCLK1_B, SEL_TMU1_1),
779 PINMUX_IPSR_DATA(IP2_11_8, CC5_OSCOUT), 778 PINMUX_IPSR_DATA(IP2_11_8, CC5_OSCOUT),
780 PINMUX_IPSR_MODSEL_DATA(IP2_15_12, HRTS0, SEL_HSCIF0_0), 779 PINMUX_IPSR_MSEL(IP2_15_12, HRTS0, SEL_HSCIF0_0),
781 PINMUX_IPSR_MODSEL_DATA(IP2_15_12, RTS1_TANS, SEL_SCIF1_0), 780 PINMUX_IPSR_MSEL(IP2_15_12, RTS1_TANS, SEL_SCIF1_0),
782 PINMUX_IPSR_DATA(IP2_15_12, MDATA), 781 PINMUX_IPSR_DATA(IP2_15_12, MDATA),
783 PINMUX_IPSR_DATA(IP2_15_12, TX0_C), 782 PINMUX_IPSR_DATA(IP2_15_12, TX0_C),
784 PINMUX_IPSR_DATA(IP2_15_12, SUB_TMS), 783 PINMUX_IPSR_DATA(IP2_15_12, SUB_TMS),
@@ -789,17 +788,17 @@ static const u16 pinmux_data[] = {
789 PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE33), 788 PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE33),
790 PINMUX_IPSR_DATA(IP2_18_16, DU0_DR0), 789 PINMUX_IPSR_DATA(IP2_18_16, DU0_DR0),
791 PINMUX_IPSR_DATA(IP2_18_16, LCDOUT0), 790 PINMUX_IPSR_DATA(IP2_18_16, LCDOUT0),
792 PINMUX_IPSR_MODSEL_DATA(IP2_18_16, DREQ0, SEL_EXBUS0_0), 791 PINMUX_IPSR_MSEL(IP2_18_16, DREQ0, SEL_EXBUS0_0),
793 PINMUX_IPSR_MODSEL_DATA(IP2_18_16, GPS_CLK_B, SEL_GPS_1), 792 PINMUX_IPSR_MSEL(IP2_18_16, GPS_CLK_B, SEL_GPS_1),
794 PINMUX_IPSR_DATA(IP2_18_16, AUDATA0), 793 PINMUX_IPSR_DATA(IP2_18_16, AUDATA0),
795 PINMUX_IPSR_DATA(IP2_18_16, TX5_C), 794 PINMUX_IPSR_DATA(IP2_18_16, TX5_C),
796 PINMUX_IPSR_DATA(IP2_21_19, DU0_DR1), 795 PINMUX_IPSR_DATA(IP2_21_19, DU0_DR1),
797 PINMUX_IPSR_DATA(IP2_21_19, LCDOUT1), 796 PINMUX_IPSR_DATA(IP2_21_19, LCDOUT1),
798 PINMUX_IPSR_DATA(IP2_21_19, DACK0), 797 PINMUX_IPSR_DATA(IP2_21_19, DACK0),
799 PINMUX_IPSR_DATA(IP2_21_19, DRACK0), 798 PINMUX_IPSR_DATA(IP2_21_19, DRACK0),
800 PINMUX_IPSR_MODSEL_DATA(IP2_21_19, GPS_SIGN_B, SEL_GPS_1), 799 PINMUX_IPSR_MSEL(IP2_21_19, GPS_SIGN_B, SEL_GPS_1),
801 PINMUX_IPSR_DATA(IP2_21_19, AUDATA1), 800 PINMUX_IPSR_DATA(IP2_21_19, AUDATA1),
802 PINMUX_IPSR_MODSEL_DATA(IP2_21_19, RX5_C, SEL_SCIF5_2), 801 PINMUX_IPSR_MSEL(IP2_21_19, RX5_C, SEL_SCIF5_2),
803 PINMUX_IPSR_DATA(IP2_22, DU0_DR2), 802 PINMUX_IPSR_DATA(IP2_22, DU0_DR2),
804 PINMUX_IPSR_DATA(IP2_22, LCDOUT2), 803 PINMUX_IPSR_DATA(IP2_22, LCDOUT2),
805 PINMUX_IPSR_DATA(IP2_23, DU0_DR3), 804 PINMUX_IPSR_DATA(IP2_23, DU0_DR3),
@@ -814,14 +813,14 @@ static const u16 pinmux_data[] = {
814 PINMUX_IPSR_DATA(IP2_27, LCDOUT7), 813 PINMUX_IPSR_DATA(IP2_27, LCDOUT7),
815 PINMUX_IPSR_DATA(IP2_30_28, DU0_DG0), 814 PINMUX_IPSR_DATA(IP2_30_28, DU0_DG0),
816 PINMUX_IPSR_DATA(IP2_30_28, LCDOUT8), 815 PINMUX_IPSR_DATA(IP2_30_28, LCDOUT8),
817 PINMUX_IPSR_MODSEL_DATA(IP2_30_28, DREQ1, SEL_EXBUS1_0), 816 PINMUX_IPSR_MSEL(IP2_30_28, DREQ1, SEL_EXBUS1_0),
818 PINMUX_IPSR_MODSEL_DATA(IP2_30_28, SCL2, SEL_I2C2_0), 817 PINMUX_IPSR_MSEL(IP2_30_28, SCL2, SEL_I2C2_0),
819 PINMUX_IPSR_DATA(IP2_30_28, AUDATA2), 818 PINMUX_IPSR_DATA(IP2_30_28, AUDATA2),
820 819
821 PINMUX_IPSR_DATA(IP3_2_0, DU0_DG1), 820 PINMUX_IPSR_DATA(IP3_2_0, DU0_DG1),
822 PINMUX_IPSR_DATA(IP3_2_0, LCDOUT9), 821 PINMUX_IPSR_DATA(IP3_2_0, LCDOUT9),
823 PINMUX_IPSR_DATA(IP3_2_0, DACK1), 822 PINMUX_IPSR_DATA(IP3_2_0, DACK1),
824 PINMUX_IPSR_MODSEL_DATA(IP3_2_0, SDA2, SEL_I2C2_0), 823 PINMUX_IPSR_MSEL(IP3_2_0, SDA2, SEL_I2C2_0),
825 PINMUX_IPSR_DATA(IP3_2_0, AUDATA3), 824 PINMUX_IPSR_DATA(IP3_2_0, AUDATA3),
826 PINMUX_IPSR_DATA(IP3_3, DU0_DG2), 825 PINMUX_IPSR_DATA(IP3_3, DU0_DG2),
827 PINMUX_IPSR_DATA(IP3_3, LCDOUT10), 826 PINMUX_IPSR_DATA(IP3_3, LCDOUT10),
@@ -838,16 +837,16 @@ static const u16 pinmux_data[] = {
838 PINMUX_IPSR_DATA(IP3_11_9, DU0_DB0), 837 PINMUX_IPSR_DATA(IP3_11_9, DU0_DB0),
839 PINMUX_IPSR_DATA(IP3_11_9, LCDOUT16), 838 PINMUX_IPSR_DATA(IP3_11_9, LCDOUT16),
840 PINMUX_IPSR_DATA(IP3_11_9, EX_WAIT1), 839 PINMUX_IPSR_DATA(IP3_11_9, EX_WAIT1),
841 PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SCL1, SEL_I2C1_0), 840 PINMUX_IPSR_MSEL(IP3_11_9, SCL1, SEL_I2C1_0),
842 PINMUX_IPSR_MODSEL_DATA(IP3_11_9, TCLK1, SEL_TMU1_0), 841 PINMUX_IPSR_MSEL(IP3_11_9, TCLK1, SEL_TMU1_0),
843 PINMUX_IPSR_DATA(IP3_11_9, AUDATA4), 842 PINMUX_IPSR_DATA(IP3_11_9, AUDATA4),
844 PINMUX_IPSR_DATA(IP3_14_12, DU0_DB1), 843 PINMUX_IPSR_DATA(IP3_14_12, DU0_DB1),
845 PINMUX_IPSR_DATA(IP3_14_12, LCDOUT17), 844 PINMUX_IPSR_DATA(IP3_14_12, LCDOUT17),
846 PINMUX_IPSR_DATA(IP3_14_12, EX_WAIT2), 845 PINMUX_IPSR_DATA(IP3_14_12, EX_WAIT2),
847 PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SDA1, SEL_I2C1_0), 846 PINMUX_IPSR_MSEL(IP3_14_12, SDA1, SEL_I2C1_0),
848 PINMUX_IPSR_MODSEL_DATA(IP3_14_12, GPS_MAG_B, SEL_GPS_1), 847 PINMUX_IPSR_MSEL(IP3_14_12, GPS_MAG_B, SEL_GPS_1),
849 PINMUX_IPSR_DATA(IP3_14_12, AUDATA5), 848 PINMUX_IPSR_DATA(IP3_14_12, AUDATA5),
850 PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SCK5_C, SEL_SCIF5_2), 849 PINMUX_IPSR_MSEL(IP3_14_12, SCK5_C, SEL_SCIF5_2),
851 PINMUX_IPSR_DATA(IP3_15, DU0_DB2), 850 PINMUX_IPSR_DATA(IP3_15, DU0_DB2),
852 PINMUX_IPSR_DATA(IP3_15, LCDOUT18), 851 PINMUX_IPSR_DATA(IP3_15, LCDOUT18),
853 PINMUX_IPSR_DATA(IP3_16, DU0_DB3), 852 PINMUX_IPSR_DATA(IP3_16, DU0_DB3),
@@ -863,14 +862,14 @@ static const u16 pinmux_data[] = {
863 PINMUX_IPSR_DATA(IP3_22_21, DU0_DOTCLKIN), 862 PINMUX_IPSR_DATA(IP3_22_21, DU0_DOTCLKIN),
864 PINMUX_IPSR_DATA(IP3_22_21, QSTVA_QVS), 863 PINMUX_IPSR_DATA(IP3_22_21, QSTVA_QVS),
865 PINMUX_IPSR_DATA(IP3_22_21, TX3_D_IRDA_TX_D), 864 PINMUX_IPSR_DATA(IP3_22_21, TX3_D_IRDA_TX_D),
866 PINMUX_IPSR_MODSEL_DATA(IP3_22_21, SCL3_B, SEL_I2C3_1), 865 PINMUX_IPSR_MSEL(IP3_22_21, SCL3_B, SEL_I2C3_1),
867 PINMUX_IPSR_DATA(IP3_23, DU0_DOTCLKOUT0), 866 PINMUX_IPSR_DATA(IP3_23, DU0_DOTCLKOUT0),
868 PINMUX_IPSR_DATA(IP3_23, QCLK), 867 PINMUX_IPSR_DATA(IP3_23, QCLK),
869 PINMUX_IPSR_DATA(IP3_26_24, DU0_DOTCLKOUT1), 868 PINMUX_IPSR_DATA(IP3_26_24, DU0_DOTCLKOUT1),
870 PINMUX_IPSR_DATA(IP3_26_24, QSTVB_QVE), 869 PINMUX_IPSR_DATA(IP3_26_24, QSTVB_QVE),
871 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, RX3_D_IRDA_RX_D, SEL_SCIF3_3), 870 PINMUX_IPSR_MSEL(IP3_26_24, RX3_D_IRDA_RX_D, SEL_SCIF3_3),
872 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SDA3_B, SEL_I2C3_1), 871 PINMUX_IPSR_MSEL(IP3_26_24, SDA3_B, SEL_I2C3_1),
873 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SDA2_C, SEL_I2C2_2), 872 PINMUX_IPSR_MSEL(IP3_26_24, SDA2_C, SEL_I2C2_2),
874 PINMUX_IPSR_DATA(IP3_26_24, DACK0_B), 873 PINMUX_IPSR_DATA(IP3_26_24, DACK0_B),
875 PINMUX_IPSR_DATA(IP3_26_24, DRACK0_B), 874 PINMUX_IPSR_DATA(IP3_26_24, DRACK0_B),
876 PINMUX_IPSR_DATA(IP3_27, DU0_EXHSYNC_DU0_HSYNC), 875 PINMUX_IPSR_DATA(IP3_27, DU0_EXHSYNC_DU0_HSYNC),
@@ -881,34 +880,34 @@ static const u16 pinmux_data[] = {
881 PINMUX_IPSR_DATA(IP3_31_29, QCPV_QDE), 880 PINMUX_IPSR_DATA(IP3_31_29, QCPV_QDE),
882 PINMUX_IPSR_DATA(IP3_31_29, CAN1_TX), 881 PINMUX_IPSR_DATA(IP3_31_29, CAN1_TX),
883 PINMUX_IPSR_DATA(IP3_31_29, TX2_C), 882 PINMUX_IPSR_DATA(IP3_31_29, TX2_C),
884 PINMUX_IPSR_MODSEL_DATA(IP3_31_29, SCL2_C, SEL_I2C2_2), 883 PINMUX_IPSR_MSEL(IP3_31_29, SCL2_C, SEL_I2C2_2),
885 PINMUX_IPSR_DATA(IP3_31_29, REMOCON), 884 PINMUX_IPSR_DATA(IP3_31_29, REMOCON),
886 885
887 PINMUX_IPSR_DATA(IP4_1_0, DU0_DISP), 886 PINMUX_IPSR_DATA(IP4_1_0, DU0_DISP),
888 PINMUX_IPSR_DATA(IP4_1_0, QPOLA), 887 PINMUX_IPSR_DATA(IP4_1_0, QPOLA),
889 PINMUX_IPSR_MODSEL_DATA(IP4_1_0, CAN_CLK_C, SEL_CANCLK_2), 888 PINMUX_IPSR_MSEL(IP4_1_0, CAN_CLK_C, SEL_CANCLK_2),
890 PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCK2_C, SEL_SCIF2_2), 889 PINMUX_IPSR_MSEL(IP4_1_0, SCK2_C, SEL_SCIF2_2),
891 PINMUX_IPSR_DATA(IP4_4_2, DU0_CDE), 890 PINMUX_IPSR_DATA(IP4_4_2, DU0_CDE),
892 PINMUX_IPSR_DATA(IP4_4_2, QPOLB), 891 PINMUX_IPSR_DATA(IP4_4_2, QPOLB),
893 PINMUX_IPSR_DATA(IP4_4_2, CAN1_RX), 892 PINMUX_IPSR_DATA(IP4_4_2, CAN1_RX),
894 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, RX2_C, SEL_SCIF2_2), 893 PINMUX_IPSR_MSEL(IP4_4_2, RX2_C, SEL_SCIF2_2),
895 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, DREQ0_B, SEL_EXBUS0_1), 894 PINMUX_IPSR_MSEL(IP4_4_2, DREQ0_B, SEL_EXBUS0_1),
896 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SSI_SCK78_B, SEL_SSI7_1), 895 PINMUX_IPSR_MSEL(IP4_4_2, SSI_SCK78_B, SEL_SSI7_1),
897 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SCK0_B, SEL_SCIF0_1), 896 PINMUX_IPSR_MSEL(IP4_4_2, SCK0_B, SEL_SCIF0_1),
898 PINMUX_IPSR_DATA(IP4_7_5, DU1_DR0), 897 PINMUX_IPSR_DATA(IP4_7_5, DU1_DR0),
899 PINMUX_IPSR_DATA(IP4_7_5, VI2_DATA0_VI2_B0), 898 PINMUX_IPSR_DATA(IP4_7_5, VI2_DATA0_VI2_B0),
900 PINMUX_IPSR_DATA(IP4_7_5, PWM6), 899 PINMUX_IPSR_DATA(IP4_7_5, PWM6),
901 PINMUX_IPSR_DATA(IP4_7_5, SD3_CLK), 900 PINMUX_IPSR_DATA(IP4_7_5, SD3_CLK),
902 PINMUX_IPSR_DATA(IP4_7_5, TX3_E_IRDA_TX_E), 901 PINMUX_IPSR_DATA(IP4_7_5, TX3_E_IRDA_TX_E),
903 PINMUX_IPSR_DATA(IP4_7_5, AUDCK), 902 PINMUX_IPSR_DATA(IP4_7_5, AUDCK),
904 PINMUX_IPSR_MODSEL_DATA(IP4_7_5, PWMFSW0_B, SEL_PWMFSW_1), 903 PINMUX_IPSR_MSEL(IP4_7_5, PWMFSW0_B, SEL_PWMFSW_1),
905 PINMUX_IPSR_DATA(IP4_10_8, DU1_DR1), 904 PINMUX_IPSR_DATA(IP4_10_8, DU1_DR1),
906 PINMUX_IPSR_DATA(IP4_10_8, VI2_DATA1_VI2_B1), 905 PINMUX_IPSR_DATA(IP4_10_8, VI2_DATA1_VI2_B1),
907 PINMUX_IPSR_DATA(IP4_10_8, PWM0), 906 PINMUX_IPSR_DATA(IP4_10_8, PWM0),
908 PINMUX_IPSR_DATA(IP4_10_8, SD3_CMD), 907 PINMUX_IPSR_DATA(IP4_10_8, SD3_CMD),
909 PINMUX_IPSR_MODSEL_DATA(IP4_10_8, RX3_E_IRDA_RX_E, SEL_SCIF3_4), 908 PINMUX_IPSR_MSEL(IP4_10_8, RX3_E_IRDA_RX_E, SEL_SCIF3_4),
910 PINMUX_IPSR_DATA(IP4_10_8, AUDSYNC), 909 PINMUX_IPSR_DATA(IP4_10_8, AUDSYNC),
911 PINMUX_IPSR_MODSEL_DATA(IP4_10_8, CTS0_D, SEL_SCIF0_3), 910 PINMUX_IPSR_MSEL(IP4_10_8, CTS0_D, SEL_SCIF0_3),
912 PINMUX_IPSR_DATA(IP4_11, DU1_DR2), 911 PINMUX_IPSR_DATA(IP4_11, DU1_DR2),
913 PINMUX_IPSR_DATA(IP4_11, VI2_G0), 912 PINMUX_IPSR_DATA(IP4_11, VI2_G0),
914 PINMUX_IPSR_DATA(IP4_12, DU1_DR3), 913 PINMUX_IPSR_DATA(IP4_12, DU1_DR3),
@@ -923,18 +922,18 @@ static const u16 pinmux_data[] = {
923 PINMUX_IPSR_DATA(IP4_16, VI2_G5), 922 PINMUX_IPSR_DATA(IP4_16, VI2_G5),
924 PINMUX_IPSR_DATA(IP4_19_17, DU1_DG0), 923 PINMUX_IPSR_DATA(IP4_19_17, DU1_DG0),
925 PINMUX_IPSR_DATA(IP4_19_17, VI2_DATA2_VI2_B2), 924 PINMUX_IPSR_DATA(IP4_19_17, VI2_DATA2_VI2_B2),
926 PINMUX_IPSR_MODSEL_DATA(IP4_19_17, SCL1_B, SEL_I2C1_1), 925 PINMUX_IPSR_MSEL(IP4_19_17, SCL1_B, SEL_I2C1_1),
927 PINMUX_IPSR_DATA(IP4_19_17, SD3_DAT2), 926 PINMUX_IPSR_DATA(IP4_19_17, SD3_DAT2),
928 PINMUX_IPSR_MODSEL_DATA(IP4_19_17, SCK3_E, SEL_SCIF3_4), 927 PINMUX_IPSR_MSEL(IP4_19_17, SCK3_E, SEL_SCIF3_4),
929 PINMUX_IPSR_DATA(IP4_19_17, AUDATA6), 928 PINMUX_IPSR_DATA(IP4_19_17, AUDATA6),
930 PINMUX_IPSR_DATA(IP4_19_17, TX0_D), 929 PINMUX_IPSR_DATA(IP4_19_17, TX0_D),
931 PINMUX_IPSR_DATA(IP4_22_20, DU1_DG1), 930 PINMUX_IPSR_DATA(IP4_22_20, DU1_DG1),
932 PINMUX_IPSR_DATA(IP4_22_20, VI2_DATA3_VI2_B3), 931 PINMUX_IPSR_DATA(IP4_22_20, VI2_DATA3_VI2_B3),
933 PINMUX_IPSR_MODSEL_DATA(IP4_22_20, SDA1_B, SEL_I2C1_1), 932 PINMUX_IPSR_MSEL(IP4_22_20, SDA1_B, SEL_I2C1_1),
934 PINMUX_IPSR_DATA(IP4_22_20, SD3_DAT3), 933 PINMUX_IPSR_DATA(IP4_22_20, SD3_DAT3),
935 PINMUX_IPSR_MODSEL_DATA(IP4_22_20, SCK5, SEL_SCIF5_0), 934 PINMUX_IPSR_MSEL(IP4_22_20, SCK5, SEL_SCIF5_0),
936 PINMUX_IPSR_DATA(IP4_22_20, AUDATA7), 935 PINMUX_IPSR_DATA(IP4_22_20, AUDATA7),
937 PINMUX_IPSR_MODSEL_DATA(IP4_22_20, RX0_D, SEL_SCIF0_3), 936 PINMUX_IPSR_MSEL(IP4_22_20, RX0_D, SEL_SCIF0_3),
938 PINMUX_IPSR_DATA(IP4_23, DU1_DG2), 937 PINMUX_IPSR_DATA(IP4_23, DU1_DG2),
939 PINMUX_IPSR_DATA(IP4_23, VI2_G6), 938 PINMUX_IPSR_DATA(IP4_23, VI2_G6),
940 PINMUX_IPSR_DATA(IP4_24, DU1_DG3), 939 PINMUX_IPSR_DATA(IP4_24, DU1_DG3),
@@ -949,17 +948,17 @@ static const u16 pinmux_data[] = {
949 PINMUX_IPSR_DATA(IP4_28, VI2_R3), 948 PINMUX_IPSR_DATA(IP4_28, VI2_R3),
950 PINMUX_IPSR_DATA(IP4_31_29, DU1_DB0), 949 PINMUX_IPSR_DATA(IP4_31_29, DU1_DB0),
951 PINMUX_IPSR_DATA(IP4_31_29, VI2_DATA4_VI2_B4), 950 PINMUX_IPSR_DATA(IP4_31_29, VI2_DATA4_VI2_B4),
952 PINMUX_IPSR_MODSEL_DATA(IP4_31_29, SCL2_B, SEL_I2C2_1), 951 PINMUX_IPSR_MSEL(IP4_31_29, SCL2_B, SEL_I2C2_1),
953 PINMUX_IPSR_DATA(IP4_31_29, SD3_DAT0), 952 PINMUX_IPSR_DATA(IP4_31_29, SD3_DAT0),
954 PINMUX_IPSR_DATA(IP4_31_29, TX5), 953 PINMUX_IPSR_DATA(IP4_31_29, TX5),
955 PINMUX_IPSR_MODSEL_DATA(IP4_31_29, SCK0_D, SEL_SCIF0_3), 954 PINMUX_IPSR_MSEL(IP4_31_29, SCK0_D, SEL_SCIF0_3),
956 955
957 PINMUX_IPSR_DATA(IP5_2_0, DU1_DB1), 956 PINMUX_IPSR_DATA(IP5_2_0, DU1_DB1),
958 PINMUX_IPSR_DATA(IP5_2_0, VI2_DATA5_VI2_B5), 957 PINMUX_IPSR_DATA(IP5_2_0, VI2_DATA5_VI2_B5),
959 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, SDA2_B, SEL_I2C2_1), 958 PINMUX_IPSR_MSEL(IP5_2_0, SDA2_B, SEL_I2C2_1),
960 PINMUX_IPSR_DATA(IP5_2_0, SD3_DAT1), 959 PINMUX_IPSR_DATA(IP5_2_0, SD3_DAT1),
961 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RX5, SEL_SCIF5_0), 960 PINMUX_IPSR_MSEL(IP5_2_0, RX5, SEL_SCIF5_0),
962 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RTS0_D_TANS_D, SEL_SCIF0_3), 961 PINMUX_IPSR_MSEL(IP5_2_0, RTS0_D_TANS_D, SEL_SCIF0_3),
963 PINMUX_IPSR_DATA(IP5_3, DU1_DB2), 962 PINMUX_IPSR_DATA(IP5_3, DU1_DB2),
964 PINMUX_IPSR_DATA(IP5_3, VI2_R4), 963 PINMUX_IPSR_DATA(IP5_3, VI2_R4),
965 PINMUX_IPSR_DATA(IP5_4, DU1_DB3), 964 PINMUX_IPSR_DATA(IP5_4, DU1_DB3),
@@ -969,16 +968,16 @@ static const u16 pinmux_data[] = {
969 PINMUX_IPSR_DATA(IP5_6, DU1_DB5), 968 PINMUX_IPSR_DATA(IP5_6, DU1_DB5),
970 PINMUX_IPSR_DATA(IP5_6, VI2_R7), 969 PINMUX_IPSR_DATA(IP5_6, VI2_R7),
971 PINMUX_IPSR_DATA(IP5_7, DU1_DB6), 970 PINMUX_IPSR_DATA(IP5_7, DU1_DB6),
972 PINMUX_IPSR_MODSEL_DATA(IP5_7, SCL2_D, SEL_I2C2_3), 971 PINMUX_IPSR_MSEL(IP5_7, SCL2_D, SEL_I2C2_3),
973 PINMUX_IPSR_DATA(IP5_8, DU1_DB7), 972 PINMUX_IPSR_DATA(IP5_8, DU1_DB7),
974 PINMUX_IPSR_MODSEL_DATA(IP5_8, SDA2_D, SEL_I2C2_3), 973 PINMUX_IPSR_MSEL(IP5_8, SDA2_D, SEL_I2C2_3),
975 PINMUX_IPSR_DATA(IP5_10_9, DU1_DOTCLKIN), 974 PINMUX_IPSR_DATA(IP5_10_9, DU1_DOTCLKIN),
976 PINMUX_IPSR_DATA(IP5_10_9, VI2_CLKENB), 975 PINMUX_IPSR_DATA(IP5_10_9, VI2_CLKENB),
977 PINMUX_IPSR_MODSEL_DATA(IP5_10_9, HSPI_CS1, SEL_HSPI1_0), 976 PINMUX_IPSR_MSEL(IP5_10_9, HSPI_CS1, SEL_HSPI1_0),
978 PINMUX_IPSR_MODSEL_DATA(IP5_10_9, SCL1_D, SEL_I2C1_3), 977 PINMUX_IPSR_MSEL(IP5_10_9, SCL1_D, SEL_I2C1_3),
979 PINMUX_IPSR_DATA(IP5_12_11, DU1_DOTCLKOUT), 978 PINMUX_IPSR_DATA(IP5_12_11, DU1_DOTCLKOUT),
980 PINMUX_IPSR_DATA(IP5_12_11, VI2_FIELD), 979 PINMUX_IPSR_DATA(IP5_12_11, VI2_FIELD),
981 PINMUX_IPSR_MODSEL_DATA(IP5_12_11, SDA1_D, SEL_I2C1_3), 980 PINMUX_IPSR_MSEL(IP5_12_11, SDA1_D, SEL_I2C1_3),
982 PINMUX_IPSR_DATA(IP5_14_13, DU1_EXHSYNC_DU1_HSYNC), 981 PINMUX_IPSR_DATA(IP5_14_13, DU1_EXHSYNC_DU1_HSYNC),
983 PINMUX_IPSR_DATA(IP5_14_13, VI2_HSYNC), 982 PINMUX_IPSR_DATA(IP5_14_13, VI2_HSYNC),
984 PINMUX_IPSR_DATA(IP5_14_13, VI3_HSYNC), 983 PINMUX_IPSR_DATA(IP5_14_13, VI3_HSYNC),
@@ -995,26 +994,26 @@ static const u16 pinmux_data[] = {
995 PINMUX_IPSR_DATA(IP5_20_17, AUDIO_CLKC), 994 PINMUX_IPSR_DATA(IP5_20_17, AUDIO_CLKC),
996 PINMUX_IPSR_DATA(IP5_20_17, TX2_D), 995 PINMUX_IPSR_DATA(IP5_20_17, TX2_D),
997 PINMUX_IPSR_DATA(IP5_20_17, SPEEDIN), 996 PINMUX_IPSR_DATA(IP5_20_17, SPEEDIN),
998 PINMUX_IPSR_MODSEL_DATA(IP5_20_17, GPS_SIGN_D, SEL_GPS_3), 997 PINMUX_IPSR_MSEL(IP5_20_17, GPS_SIGN_D, SEL_GPS_3),
999 PINMUX_IPSR_DATA(IP5_23_21, DU1_DISP), 998 PINMUX_IPSR_DATA(IP5_23_21, DU1_DISP),
1000 PINMUX_IPSR_DATA(IP5_23_21, VI2_DATA6_VI2_B6), 999 PINMUX_IPSR_DATA(IP5_23_21, VI2_DATA6_VI2_B6),
1001 PINMUX_IPSR_MODSEL_DATA(IP5_23_21, TCLK0, SEL_TMU0_0), 1000 PINMUX_IPSR_MSEL(IP5_23_21, TCLK0, SEL_TMU0_0),
1002 PINMUX_IPSR_DATA(IP5_23_21, QSTVA_B_QVS_B), 1001 PINMUX_IPSR_DATA(IP5_23_21, QSTVA_B_QVS_B),
1003 PINMUX_IPSR_MODSEL_DATA(IP5_23_21, HSPI_CLK1, SEL_HSPI1_0), 1002 PINMUX_IPSR_MSEL(IP5_23_21, HSPI_CLK1, SEL_HSPI1_0),
1004 PINMUX_IPSR_MODSEL_DATA(IP5_23_21, SCK2_D, SEL_SCIF2_3), 1003 PINMUX_IPSR_MSEL(IP5_23_21, SCK2_D, SEL_SCIF2_3),
1005 PINMUX_IPSR_DATA(IP5_23_21, AUDIO_CLKOUT_B), 1004 PINMUX_IPSR_DATA(IP5_23_21, AUDIO_CLKOUT_B),
1006 PINMUX_IPSR_MODSEL_DATA(IP5_23_21, GPS_MAG_D, SEL_GPS_3), 1005 PINMUX_IPSR_MSEL(IP5_23_21, GPS_MAG_D, SEL_GPS_3),
1007 PINMUX_IPSR_DATA(IP5_27_24, DU1_CDE), 1006 PINMUX_IPSR_DATA(IP5_27_24, DU1_CDE),
1008 PINMUX_IPSR_DATA(IP5_27_24, VI2_DATA7_VI2_B7), 1007 PINMUX_IPSR_DATA(IP5_27_24, VI2_DATA7_VI2_B7),
1009 PINMUX_IPSR_MODSEL_DATA(IP5_27_24, RX3_B_IRDA_RX_B, SEL_SCIF3_1), 1008 PINMUX_IPSR_MSEL(IP5_27_24, RX3_B_IRDA_RX_B, SEL_SCIF3_1),
1010 PINMUX_IPSR_DATA(IP5_27_24, SD3_WP), 1009 PINMUX_IPSR_DATA(IP5_27_24, SD3_WP),
1011 PINMUX_IPSR_MODSEL_DATA(IP5_27_24, HSPI_RX1, SEL_HSPI1_0), 1010 PINMUX_IPSR_MSEL(IP5_27_24, HSPI_RX1, SEL_HSPI1_0),
1012 PINMUX_IPSR_DATA(IP5_27_24, VI1_FIELD), 1011 PINMUX_IPSR_DATA(IP5_27_24, VI1_FIELD),
1013 PINMUX_IPSR_DATA(IP5_27_24, VI3_FIELD), 1012 PINMUX_IPSR_DATA(IP5_27_24, VI3_FIELD),
1014 PINMUX_IPSR_DATA(IP5_27_24, AUDIO_CLKOUT), 1013 PINMUX_IPSR_DATA(IP5_27_24, AUDIO_CLKOUT),
1015 PINMUX_IPSR_MODSEL_DATA(IP5_27_24, RX2_D, SEL_SCIF2_3), 1014 PINMUX_IPSR_MSEL(IP5_27_24, RX2_D, SEL_SCIF2_3),
1016 PINMUX_IPSR_MODSEL_DATA(IP5_27_24, GPS_CLK_C, SEL_GPS_2), 1015 PINMUX_IPSR_MSEL(IP5_27_24, GPS_CLK_C, SEL_GPS_2),
1017 PINMUX_IPSR_MODSEL_DATA(IP5_27_24, GPS_CLK_D, SEL_GPS_3), 1016 PINMUX_IPSR_MSEL(IP5_27_24, GPS_CLK_D, SEL_GPS_3),
1018 PINMUX_IPSR_DATA(IP5_28, AUDIO_CLKA), 1017 PINMUX_IPSR_DATA(IP5_28, AUDIO_CLKA),
1019 PINMUX_IPSR_DATA(IP5_28, CAN_TXCLK), 1018 PINMUX_IPSR_DATA(IP5_28, CAN_TXCLK),
1020 PINMUX_IPSR_DATA(IP5_30_29, AUDIO_CLKB), 1019 PINMUX_IPSR_DATA(IP5_30_29, AUDIO_CLKB),
@@ -1039,82 +1038,82 @@ static const u16 pinmux_data[] = {
1039 PINMUX_IPSR_DATA(IP6_11_9, SSI_SCK34), 1038 PINMUX_IPSR_DATA(IP6_11_9, SSI_SCK34),
1040 PINMUX_IPSR_DATA(IP6_11_9, CAN_DEBUGOUT6), 1039 PINMUX_IPSR_DATA(IP6_11_9, CAN_DEBUGOUT6),
1041 PINMUX_IPSR_DATA(IP6_11_9, CAN0_TX_B), 1040 PINMUX_IPSR_DATA(IP6_11_9, CAN0_TX_B),
1042 PINMUX_IPSR_MODSEL_DATA(IP6_11_9, IERX, SEL_IE_0), 1041 PINMUX_IPSR_MSEL(IP6_11_9, IERX, SEL_IE_0),
1043 PINMUX_IPSR_MODSEL_DATA(IP6_11_9, SSI_SCK9_C, SEL_SSI9_2), 1042 PINMUX_IPSR_MSEL(IP6_11_9, SSI_SCK9_C, SEL_SSI9_2),
1044 PINMUX_IPSR_DATA(IP6_14_12, SSI_WS34), 1043 PINMUX_IPSR_DATA(IP6_14_12, SSI_WS34),
1045 PINMUX_IPSR_DATA(IP6_14_12, CAN_DEBUGOUT7), 1044 PINMUX_IPSR_DATA(IP6_14_12, CAN_DEBUGOUT7),
1046 PINMUX_IPSR_MODSEL_DATA(IP6_14_12, CAN0_RX_B, SEL_CAN0_1), 1045 PINMUX_IPSR_MSEL(IP6_14_12, CAN0_RX_B, SEL_CAN0_1),
1047 PINMUX_IPSR_DATA(IP6_14_12, IETX), 1046 PINMUX_IPSR_DATA(IP6_14_12, IETX),
1048 PINMUX_IPSR_MODSEL_DATA(IP6_14_12, SSI_WS9_C, SEL_SSI9_2), 1047 PINMUX_IPSR_MSEL(IP6_14_12, SSI_WS9_C, SEL_SSI9_2),
1049 PINMUX_IPSR_DATA(IP6_17_15, SSI_SDATA3), 1048 PINMUX_IPSR_DATA(IP6_17_15, SSI_SDATA3),
1050 PINMUX_IPSR_DATA(IP6_17_15, PWM0_C), 1049 PINMUX_IPSR_DATA(IP6_17_15, PWM0_C),
1051 PINMUX_IPSR_DATA(IP6_17_15, CAN_DEBUGOUT8), 1050 PINMUX_IPSR_DATA(IP6_17_15, CAN_DEBUGOUT8),
1052 PINMUX_IPSR_MODSEL_DATA(IP6_17_15, CAN_CLK_B, SEL_CANCLK_1), 1051 PINMUX_IPSR_MSEL(IP6_17_15, CAN_CLK_B, SEL_CANCLK_1),
1053 PINMUX_IPSR_MODSEL_DATA(IP6_17_15, IECLK, SEL_IE_0), 1052 PINMUX_IPSR_MSEL(IP6_17_15, IECLK, SEL_IE_0),
1054 PINMUX_IPSR_MODSEL_DATA(IP6_17_15, SCIF_CLK_B, SEL_SCIF_1), 1053 PINMUX_IPSR_MSEL(IP6_17_15, SCIF_CLK_B, SEL_SCIF_1),
1055 PINMUX_IPSR_MODSEL_DATA(IP6_17_15, TCLK0_B, SEL_TMU0_1), 1054 PINMUX_IPSR_MSEL(IP6_17_15, TCLK0_B, SEL_TMU0_1),
1056 PINMUX_IPSR_DATA(IP6_19_18, SSI_SDATA4), 1055 PINMUX_IPSR_DATA(IP6_19_18, SSI_SDATA4),
1057 PINMUX_IPSR_DATA(IP6_19_18, CAN_DEBUGOUT9), 1056 PINMUX_IPSR_DATA(IP6_19_18, CAN_DEBUGOUT9),
1058 PINMUX_IPSR_MODSEL_DATA(IP6_19_18, SSI_SDATA9_C, SEL_SSI9_2), 1057 PINMUX_IPSR_MSEL(IP6_19_18, SSI_SDATA9_C, SEL_SSI9_2),
1059 PINMUX_IPSR_DATA(IP6_22_20, SSI_SCK5), 1058 PINMUX_IPSR_DATA(IP6_22_20, SSI_SCK5),
1060 PINMUX_IPSR_DATA(IP6_22_20, ADICLK), 1059 PINMUX_IPSR_DATA(IP6_22_20, ADICLK),
1061 PINMUX_IPSR_DATA(IP6_22_20, CAN_DEBUGOUT10), 1060 PINMUX_IPSR_DATA(IP6_22_20, CAN_DEBUGOUT10),
1062 PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCK3, SEL_SCIF3_0), 1061 PINMUX_IPSR_MSEL(IP6_22_20, SCK3, SEL_SCIF3_0),
1063 PINMUX_IPSR_MODSEL_DATA(IP6_22_20, TCLK0_D, SEL_TMU0_3), 1062 PINMUX_IPSR_MSEL(IP6_22_20, TCLK0_D, SEL_TMU0_3),
1064 PINMUX_IPSR_DATA(IP6_24_23, SSI_WS5), 1063 PINMUX_IPSR_DATA(IP6_24_23, SSI_WS5),
1065 PINMUX_IPSR_MODSEL_DATA(IP6_24_23, ADICS_SAMP, SEL_ADI_0), 1064 PINMUX_IPSR_MSEL(IP6_24_23, ADICS_SAMP, SEL_ADI_0),
1066 PINMUX_IPSR_DATA(IP6_24_23, CAN_DEBUGOUT11), 1065 PINMUX_IPSR_DATA(IP6_24_23, CAN_DEBUGOUT11),
1067 PINMUX_IPSR_DATA(IP6_24_23, TX3_IRDA_TX), 1066 PINMUX_IPSR_DATA(IP6_24_23, TX3_IRDA_TX),
1068 PINMUX_IPSR_DATA(IP6_26_25, SSI_SDATA5), 1067 PINMUX_IPSR_DATA(IP6_26_25, SSI_SDATA5),
1069 PINMUX_IPSR_MODSEL_DATA(IP6_26_25, ADIDATA, SEL_ADI_0), 1068 PINMUX_IPSR_MSEL(IP6_26_25, ADIDATA, SEL_ADI_0),
1070 PINMUX_IPSR_DATA(IP6_26_25, CAN_DEBUGOUT12), 1069 PINMUX_IPSR_DATA(IP6_26_25, CAN_DEBUGOUT12),
1071 PINMUX_IPSR_MODSEL_DATA(IP6_26_25, RX3_IRDA_RX, SEL_SCIF3_0), 1070 PINMUX_IPSR_MSEL(IP6_26_25, RX3_IRDA_RX, SEL_SCIF3_0),
1072 PINMUX_IPSR_DATA(IP6_30_29, SSI_SCK6), 1071 PINMUX_IPSR_DATA(IP6_30_29, SSI_SCK6),
1073 PINMUX_IPSR_DATA(IP6_30_29, ADICHS0), 1072 PINMUX_IPSR_DATA(IP6_30_29, ADICHS0),
1074 PINMUX_IPSR_DATA(IP6_30_29, CAN0_TX), 1073 PINMUX_IPSR_DATA(IP6_30_29, CAN0_TX),
1075 PINMUX_IPSR_MODSEL_DATA(IP6_30_29, IERX_B, SEL_IE_1), 1074 PINMUX_IPSR_MSEL(IP6_30_29, IERX_B, SEL_IE_1),
1076 1075
1077 PINMUX_IPSR_DATA(IP7_1_0, SSI_WS6), 1076 PINMUX_IPSR_DATA(IP7_1_0, SSI_WS6),
1078 PINMUX_IPSR_DATA(IP7_1_0, ADICHS1), 1077 PINMUX_IPSR_DATA(IP7_1_0, ADICHS1),
1079 PINMUX_IPSR_MODSEL_DATA(IP7_1_0, CAN0_RX, SEL_CAN0_0), 1078 PINMUX_IPSR_MSEL(IP7_1_0, CAN0_RX, SEL_CAN0_0),
1080 PINMUX_IPSR_DATA(IP7_1_0, IETX_B), 1079 PINMUX_IPSR_DATA(IP7_1_0, IETX_B),
1081 PINMUX_IPSR_DATA(IP7_3_2, SSI_SDATA6), 1080 PINMUX_IPSR_DATA(IP7_3_2, SSI_SDATA6),
1082 PINMUX_IPSR_DATA(IP7_3_2, ADICHS2), 1081 PINMUX_IPSR_DATA(IP7_3_2, ADICHS2),
1083 PINMUX_IPSR_MODSEL_DATA(IP7_3_2, CAN_CLK, SEL_CANCLK_0), 1082 PINMUX_IPSR_MSEL(IP7_3_2, CAN_CLK, SEL_CANCLK_0),
1084 PINMUX_IPSR_MODSEL_DATA(IP7_3_2, IECLK_B, SEL_IE_1), 1083 PINMUX_IPSR_MSEL(IP7_3_2, IECLK_B, SEL_IE_1),
1085 PINMUX_IPSR_MODSEL_DATA(IP7_6_4, SSI_SCK78, SEL_SSI7_0), 1084 PINMUX_IPSR_MSEL(IP7_6_4, SSI_SCK78, SEL_SSI7_0),
1086 PINMUX_IPSR_DATA(IP7_6_4, CAN_DEBUGOUT13), 1085 PINMUX_IPSR_DATA(IP7_6_4, CAN_DEBUGOUT13),
1087 PINMUX_IPSR_MODSEL_DATA(IP7_6_4, IRQ0_B, SEL_INT0_1), 1086 PINMUX_IPSR_MSEL(IP7_6_4, IRQ0_B, SEL_INT0_1),
1088 PINMUX_IPSR_MODSEL_DATA(IP7_6_4, SSI_SCK9_B, SEL_SSI9_1), 1087 PINMUX_IPSR_MSEL(IP7_6_4, SSI_SCK9_B, SEL_SSI9_1),
1089 PINMUX_IPSR_MODSEL_DATA(IP7_6_4, HSPI_CLK1_C, SEL_HSPI1_2), 1088 PINMUX_IPSR_MSEL(IP7_6_4, HSPI_CLK1_C, SEL_HSPI1_2),
1090 PINMUX_IPSR_MODSEL_DATA(IP7_9_7, SSI_WS78, SEL_SSI7_0), 1089 PINMUX_IPSR_MSEL(IP7_9_7, SSI_WS78, SEL_SSI7_0),
1091 PINMUX_IPSR_DATA(IP7_9_7, CAN_DEBUGOUT14), 1090 PINMUX_IPSR_DATA(IP7_9_7, CAN_DEBUGOUT14),
1092 PINMUX_IPSR_MODSEL_DATA(IP7_9_7, IRQ1_B, SEL_INT1_1), 1091 PINMUX_IPSR_MSEL(IP7_9_7, IRQ1_B, SEL_INT1_1),
1093 PINMUX_IPSR_MODSEL_DATA(IP7_9_7, SSI_WS9_B, SEL_SSI9_1), 1092 PINMUX_IPSR_MSEL(IP7_9_7, SSI_WS9_B, SEL_SSI9_1),
1094 PINMUX_IPSR_MODSEL_DATA(IP7_9_7, HSPI_CS1_C, SEL_HSPI1_2), 1093 PINMUX_IPSR_MSEL(IP7_9_7, HSPI_CS1_C, SEL_HSPI1_2),
1095 PINMUX_IPSR_MODSEL_DATA(IP7_12_10, SSI_SDATA7, SEL_SSI7_0), 1094 PINMUX_IPSR_MSEL(IP7_12_10, SSI_SDATA7, SEL_SSI7_0),
1096 PINMUX_IPSR_DATA(IP7_12_10, CAN_DEBUGOUT15), 1095 PINMUX_IPSR_DATA(IP7_12_10, CAN_DEBUGOUT15),
1097 PINMUX_IPSR_MODSEL_DATA(IP7_12_10, IRQ2_B, SEL_INT2_1), 1096 PINMUX_IPSR_MSEL(IP7_12_10, IRQ2_B, SEL_INT2_1),
1098 PINMUX_IPSR_MODSEL_DATA(IP7_12_10, TCLK1_C, SEL_TMU1_2), 1097 PINMUX_IPSR_MSEL(IP7_12_10, TCLK1_C, SEL_TMU1_2),
1099 PINMUX_IPSR_DATA(IP7_12_10, HSPI_TX1_C), 1098 PINMUX_IPSR_DATA(IP7_12_10, HSPI_TX1_C),
1100 PINMUX_IPSR_MODSEL_DATA(IP7_14_13, SSI_SDATA8, SEL_SSI8_0), 1099 PINMUX_IPSR_MSEL(IP7_14_13, SSI_SDATA8, SEL_SSI8_0),
1101 PINMUX_IPSR_DATA(IP7_14_13, VSP), 1100 PINMUX_IPSR_DATA(IP7_14_13, VSP),
1102 PINMUX_IPSR_MODSEL_DATA(IP7_14_13, IRQ3_B, SEL_INT3_1), 1101 PINMUX_IPSR_MSEL(IP7_14_13, IRQ3_B, SEL_INT3_1),
1103 PINMUX_IPSR_MODSEL_DATA(IP7_14_13, HSPI_RX1_C, SEL_HSPI1_2), 1102 PINMUX_IPSR_MSEL(IP7_14_13, HSPI_RX1_C, SEL_HSPI1_2),
1104 PINMUX_IPSR_DATA(IP7_16_15, SD0_CLK), 1103 PINMUX_IPSR_DATA(IP7_16_15, SD0_CLK),
1105 PINMUX_IPSR_DATA(IP7_16_15, ATACS01), 1104 PINMUX_IPSR_DATA(IP7_16_15, ATACS01),
1106 PINMUX_IPSR_MODSEL_DATA(IP7_16_15, SCK1_B, SEL_SCIF1_1), 1105 PINMUX_IPSR_MSEL(IP7_16_15, SCK1_B, SEL_SCIF1_1),
1107 PINMUX_IPSR_DATA(IP7_18_17, SD0_CMD), 1106 PINMUX_IPSR_DATA(IP7_18_17, SD0_CMD),
1108 PINMUX_IPSR_DATA(IP7_18_17, ATACS11), 1107 PINMUX_IPSR_DATA(IP7_18_17, ATACS11),
1109 PINMUX_IPSR_DATA(IP7_18_17, TX1_B), 1108 PINMUX_IPSR_DATA(IP7_18_17, TX1_B),
1110 PINMUX_IPSR_DATA(IP7_18_17, CC5_TDO), 1109 PINMUX_IPSR_DATA(IP7_18_17, CC5_TDO),
1111 PINMUX_IPSR_DATA(IP7_20_19, SD0_DAT0), 1110 PINMUX_IPSR_DATA(IP7_20_19, SD0_DAT0),
1112 PINMUX_IPSR_DATA(IP7_20_19, ATADIR1), 1111 PINMUX_IPSR_DATA(IP7_20_19, ATADIR1),
1113 PINMUX_IPSR_MODSEL_DATA(IP7_20_19, RX1_B, SEL_SCIF1_1), 1112 PINMUX_IPSR_MSEL(IP7_20_19, RX1_B, SEL_SCIF1_1),
1114 PINMUX_IPSR_DATA(IP7_20_19, CC5_TRST), 1113 PINMUX_IPSR_DATA(IP7_20_19, CC5_TRST),
1115 PINMUX_IPSR_DATA(IP7_22_21, SD0_DAT1), 1114 PINMUX_IPSR_DATA(IP7_22_21, SD0_DAT1),
1116 PINMUX_IPSR_DATA(IP7_22_21, ATAG1), 1115 PINMUX_IPSR_DATA(IP7_22_21, ATAG1),
1117 PINMUX_IPSR_MODSEL_DATA(IP7_22_21, SCK2_B, SEL_SCIF2_1), 1116 PINMUX_IPSR_MSEL(IP7_22_21, SCK2_B, SEL_SCIF2_1),
1118 PINMUX_IPSR_DATA(IP7_22_21, CC5_TMS), 1117 PINMUX_IPSR_DATA(IP7_22_21, CC5_TMS),
1119 PINMUX_IPSR_DATA(IP7_24_23, SD0_DAT2), 1118 PINMUX_IPSR_DATA(IP7_24_23, SD0_DAT2),
1120 PINMUX_IPSR_DATA(IP7_24_23, ATARD1), 1119 PINMUX_IPSR_DATA(IP7_24_23, ATARD1),
@@ -1122,17 +1121,17 @@ static const u16 pinmux_data[] = {
1122 PINMUX_IPSR_DATA(IP7_24_23, CC5_TCK), 1121 PINMUX_IPSR_DATA(IP7_24_23, CC5_TCK),
1123 PINMUX_IPSR_DATA(IP7_26_25, SD0_DAT3), 1122 PINMUX_IPSR_DATA(IP7_26_25, SD0_DAT3),
1124 PINMUX_IPSR_DATA(IP7_26_25, ATAWR1), 1123 PINMUX_IPSR_DATA(IP7_26_25, ATAWR1),
1125 PINMUX_IPSR_MODSEL_DATA(IP7_26_25, RX2_B, SEL_SCIF2_1), 1124 PINMUX_IPSR_MSEL(IP7_26_25, RX2_B, SEL_SCIF2_1),
1126 PINMUX_IPSR_DATA(IP7_26_25, CC5_TDI), 1125 PINMUX_IPSR_DATA(IP7_26_25, CC5_TDI),
1127 PINMUX_IPSR_DATA(IP7_28_27, SD0_CD), 1126 PINMUX_IPSR_DATA(IP7_28_27, SD0_CD),
1128 PINMUX_IPSR_MODSEL_DATA(IP7_28_27, DREQ2, SEL_EXBUS2_0), 1127 PINMUX_IPSR_MSEL(IP7_28_27, DREQ2, SEL_EXBUS2_0),
1129 PINMUX_IPSR_MODSEL_DATA(IP7_28_27, RTS1_B_TANS_B, SEL_SCIF1_1), 1128 PINMUX_IPSR_MSEL(IP7_28_27, RTS1_B_TANS_B, SEL_SCIF1_1),
1130 PINMUX_IPSR_DATA(IP7_30_29, SD0_WP), 1129 PINMUX_IPSR_DATA(IP7_30_29, SD0_WP),
1131 PINMUX_IPSR_DATA(IP7_30_29, DACK2), 1130 PINMUX_IPSR_DATA(IP7_30_29, DACK2),
1132 PINMUX_IPSR_MODSEL_DATA(IP7_30_29, CTS1_B, SEL_SCIF1_1), 1131 PINMUX_IPSR_MSEL(IP7_30_29, CTS1_B, SEL_SCIF1_1),
1133 1132
1134 PINMUX_IPSR_DATA(IP8_3_0, HSPI_CLK0), 1133 PINMUX_IPSR_DATA(IP8_3_0, HSPI_CLK0),
1135 PINMUX_IPSR_MODSEL_DATA(IP8_3_0, CTS0, SEL_SCIF0_0), 1134 PINMUX_IPSR_MSEL(IP8_3_0, CTS0, SEL_SCIF0_0),
1136 PINMUX_IPSR_DATA(IP8_3_0, USB_OVC0), 1135 PINMUX_IPSR_DATA(IP8_3_0, USB_OVC0),
1137 PINMUX_IPSR_DATA(IP8_3_0, AD_CLK), 1136 PINMUX_IPSR_DATA(IP8_3_0, AD_CLK),
1138 PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE4), 1137 PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE4),
@@ -1141,7 +1140,7 @@ static const u16 pinmux_data[] = {
1141 PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE28), 1140 PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE28),
1142 PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE36), 1141 PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE36),
1143 PINMUX_IPSR_DATA(IP8_7_4, HSPI_CS0), 1142 PINMUX_IPSR_DATA(IP8_7_4, HSPI_CS0),
1144 PINMUX_IPSR_MODSEL_DATA(IP8_7_4, RTS0_TANS, SEL_SCIF0_0), 1143 PINMUX_IPSR_MSEL(IP8_7_4, RTS0_TANS, SEL_SCIF0_0),
1145 PINMUX_IPSR_DATA(IP8_7_4, USB_OVC1), 1144 PINMUX_IPSR_DATA(IP8_7_4, USB_OVC1),
1146 PINMUX_IPSR_DATA(IP8_7_4, AD_DI), 1145 PINMUX_IPSR_DATA(IP8_7_4, AD_DI),
1147 PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE5), 1146 PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE5),
@@ -1159,7 +1158,7 @@ static const u16 pinmux_data[] = {
1159 PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE30), 1158 PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE30),
1160 PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE38), 1159 PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE38),
1161 PINMUX_IPSR_DATA(IP8_15_12, HSPI_RX0), 1160 PINMUX_IPSR_DATA(IP8_15_12, HSPI_RX0),
1162 PINMUX_IPSR_MODSEL_DATA(IP8_15_12, RX0, SEL_SCIF0_0), 1161 PINMUX_IPSR_MSEL(IP8_15_12, RX0, SEL_SCIF0_0),
1163 PINMUX_IPSR_DATA(IP8_15_12, CAN_STEP0), 1162 PINMUX_IPSR_DATA(IP8_15_12, CAN_STEP0),
1164 PINMUX_IPSR_DATA(IP8_15_12, AD_NCS), 1163 PINMUX_IPSR_DATA(IP8_15_12, AD_NCS),
1165 PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE7), 1164 PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE7),
@@ -1181,25 +1180,25 @@ static const u16 pinmux_data[] = {
1181 PINMUX_IPSR_DATA(IP8_22_21, HTX1_B), 1180 PINMUX_IPSR_DATA(IP8_22_21, HTX1_B),
1182 PINMUX_IPSR_DATA(IP8_22_21, MT1_SYNC), 1181 PINMUX_IPSR_DATA(IP8_22_21, MT1_SYNC),
1183 PINMUX_IPSR_DATA(IP8_24_23, VI0_FIELD), 1182 PINMUX_IPSR_DATA(IP8_24_23, VI0_FIELD),
1184 PINMUX_IPSR_MODSEL_DATA(IP8_24_23, RX1_C, SEL_SCIF1_2), 1183 PINMUX_IPSR_MSEL(IP8_24_23, RX1_C, SEL_SCIF1_2),
1185 PINMUX_IPSR_MODSEL_DATA(IP8_24_23, HRX1_B, SEL_HSCIF1_1), 1184 PINMUX_IPSR_MSEL(IP8_24_23, HRX1_B, SEL_HSCIF1_1),
1186 PINMUX_IPSR_DATA(IP8_27_25, VI0_HSYNC), 1185 PINMUX_IPSR_DATA(IP8_27_25, VI0_HSYNC),
1187 PINMUX_IPSR_MODSEL_DATA(IP8_27_25, VI0_DATA0_B_VI0_B0_B, SEL_VI0_1), 1186 PINMUX_IPSR_MSEL(IP8_27_25, VI0_DATA0_B_VI0_B0_B, SEL_VI0_1),
1188 PINMUX_IPSR_MODSEL_DATA(IP8_27_25, CTS1_C, SEL_SCIF1_2), 1187 PINMUX_IPSR_MSEL(IP8_27_25, CTS1_C, SEL_SCIF1_2),
1189 PINMUX_IPSR_DATA(IP8_27_25, TX4_D), 1188 PINMUX_IPSR_DATA(IP8_27_25, TX4_D),
1190 PINMUX_IPSR_DATA(IP8_27_25, MMC1_CMD), 1189 PINMUX_IPSR_DATA(IP8_27_25, MMC1_CMD),
1191 PINMUX_IPSR_MODSEL_DATA(IP8_27_25, HSCK1_B, SEL_HSCIF1_1), 1190 PINMUX_IPSR_MSEL(IP8_27_25, HSCK1_B, SEL_HSCIF1_1),
1192 PINMUX_IPSR_DATA(IP8_30_28, VI0_VSYNC), 1191 PINMUX_IPSR_DATA(IP8_30_28, VI0_VSYNC),
1193 PINMUX_IPSR_MODSEL_DATA(IP8_30_28, VI0_DATA1_B_VI0_B1_B, SEL_VI0_1), 1192 PINMUX_IPSR_MSEL(IP8_30_28, VI0_DATA1_B_VI0_B1_B, SEL_VI0_1),
1194 PINMUX_IPSR_MODSEL_DATA(IP8_30_28, RTS1_C_TANS_C, SEL_SCIF1_2), 1193 PINMUX_IPSR_MSEL(IP8_30_28, RTS1_C_TANS_C, SEL_SCIF1_2),
1195 PINMUX_IPSR_MODSEL_DATA(IP8_30_28, RX4_D, SEL_SCIF4_3), 1194 PINMUX_IPSR_MSEL(IP8_30_28, RX4_D, SEL_SCIF4_3),
1196 PINMUX_IPSR_MODSEL_DATA(IP8_30_28, PWMFSW0_C, SEL_PWMFSW_2), 1195 PINMUX_IPSR_MSEL(IP8_30_28, PWMFSW0_C, SEL_PWMFSW_2),
1197 1196
1198 PINMUX_IPSR_MODSEL_DATA(IP9_1_0, VI0_DATA0_VI0_B0, SEL_VI0_0), 1197 PINMUX_IPSR_MSEL(IP9_1_0, VI0_DATA0_VI0_B0, SEL_VI0_0),
1199 PINMUX_IPSR_MODSEL_DATA(IP9_1_0, HRTS1_B, SEL_HSCIF1_1), 1198 PINMUX_IPSR_MSEL(IP9_1_0, HRTS1_B, SEL_HSCIF1_1),
1200 PINMUX_IPSR_DATA(IP9_1_0, MT1_VCXO), 1199 PINMUX_IPSR_DATA(IP9_1_0, MT1_VCXO),
1201 PINMUX_IPSR_MODSEL_DATA(IP9_3_2, VI0_DATA1_VI0_B1, SEL_VI0_0), 1200 PINMUX_IPSR_MSEL(IP9_3_2, VI0_DATA1_VI0_B1, SEL_VI0_0),
1202 PINMUX_IPSR_MODSEL_DATA(IP9_3_2, HCTS1_B, SEL_HSCIF1_1), 1201 PINMUX_IPSR_MSEL(IP9_3_2, HCTS1_B, SEL_HSCIF1_1),
1203 PINMUX_IPSR_DATA(IP9_3_2, MT1_PWM), 1202 PINMUX_IPSR_DATA(IP9_3_2, MT1_PWM),
1204 PINMUX_IPSR_DATA(IP9_4, VI0_DATA2_VI0_B2), 1203 PINMUX_IPSR_DATA(IP9_4, VI0_DATA2_VI0_B2),
1205 PINMUX_IPSR_DATA(IP9_4, MMC1_D0), 1204 PINMUX_IPSR_DATA(IP9_4, MMC1_D0),
@@ -1216,12 +1215,12 @@ static const u16 pinmux_data[] = {
1216 PINMUX_IPSR_DATA(IP9_11_10, MMC1_D5), 1215 PINMUX_IPSR_DATA(IP9_11_10, MMC1_D5),
1217 PINMUX_IPSR_DATA(IP9_11_10, ARM_TRACEDATA_1), 1216 PINMUX_IPSR_DATA(IP9_11_10, ARM_TRACEDATA_1),
1218 PINMUX_IPSR_DATA(IP9_13_12, VI0_G0), 1217 PINMUX_IPSR_DATA(IP9_13_12, VI0_G0),
1219 PINMUX_IPSR_MODSEL_DATA(IP9_13_12, SSI_SCK78_C, SEL_SSI7_2), 1218 PINMUX_IPSR_MSEL(IP9_13_12, SSI_SCK78_C, SEL_SSI7_2),
1220 PINMUX_IPSR_MODSEL_DATA(IP9_13_12, IRQ0, SEL_INT0_0), 1219 PINMUX_IPSR_MSEL(IP9_13_12, IRQ0, SEL_INT0_0),
1221 PINMUX_IPSR_DATA(IP9_13_12, ARM_TRACEDATA_2), 1220 PINMUX_IPSR_DATA(IP9_13_12, ARM_TRACEDATA_2),
1222 PINMUX_IPSR_DATA(IP9_15_14, VI0_G1), 1221 PINMUX_IPSR_DATA(IP9_15_14, VI0_G1),
1223 PINMUX_IPSR_MODSEL_DATA(IP9_15_14, SSI_WS78_C, SEL_SSI7_2), 1222 PINMUX_IPSR_MSEL(IP9_15_14, SSI_WS78_C, SEL_SSI7_2),
1224 PINMUX_IPSR_MODSEL_DATA(IP9_15_14, IRQ1, SEL_INT1_0), 1223 PINMUX_IPSR_MSEL(IP9_15_14, IRQ1, SEL_INT1_0),
1225 PINMUX_IPSR_DATA(IP9_15_14, ARM_TRACEDATA_3), 1224 PINMUX_IPSR_DATA(IP9_15_14, ARM_TRACEDATA_3),
1226 PINMUX_IPSR_DATA(IP9_18_16, VI0_G2), 1225 PINMUX_IPSR_DATA(IP9_18_16, VI0_G2),
1227 PINMUX_IPSR_DATA(IP9_18_16, ETH_TXD1), 1226 PINMUX_IPSR_DATA(IP9_18_16, ETH_TXD1),
@@ -1235,29 +1234,29 @@ static const u16 pinmux_data[] = {
1235 PINMUX_IPSR_DATA(IP9_21_19, TS_SDAT0), 1234 PINMUX_IPSR_DATA(IP9_21_19, TS_SDAT0),
1236 PINMUX_IPSR_DATA(IP9_23_22, VI0_G4), 1235 PINMUX_IPSR_DATA(IP9_23_22, VI0_G4),
1237 PINMUX_IPSR_DATA(IP9_23_22, ETH_TX_EN), 1236 PINMUX_IPSR_DATA(IP9_23_22, ETH_TX_EN),
1238 PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SD2_DAT0_B, SEL_SD2_1), 1237 PINMUX_IPSR_MSEL(IP9_23_22, SD2_DAT0_B, SEL_SD2_1),
1239 PINMUX_IPSR_DATA(IP9_23_22, ARM_TRACEDATA_6), 1238 PINMUX_IPSR_DATA(IP9_23_22, ARM_TRACEDATA_6),
1240 PINMUX_IPSR_DATA(IP9_25_24, VI0_G5), 1239 PINMUX_IPSR_DATA(IP9_25_24, VI0_G5),
1241 PINMUX_IPSR_DATA(IP9_25_24, ETH_RX_ER), 1240 PINMUX_IPSR_DATA(IP9_25_24, ETH_RX_ER),
1242 PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SD2_DAT1_B, SEL_SD2_1), 1241 PINMUX_IPSR_MSEL(IP9_25_24, SD2_DAT1_B, SEL_SD2_1),
1243 PINMUX_IPSR_DATA(IP9_25_24, ARM_TRACEDATA_7), 1242 PINMUX_IPSR_DATA(IP9_25_24, ARM_TRACEDATA_7),
1244 PINMUX_IPSR_DATA(IP9_27_26, VI0_G6), 1243 PINMUX_IPSR_DATA(IP9_27_26, VI0_G6),
1245 PINMUX_IPSR_DATA(IP9_27_26, ETH_RXD0), 1244 PINMUX_IPSR_DATA(IP9_27_26, ETH_RXD0),
1246 PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SD2_DAT2_B, SEL_SD2_1), 1245 PINMUX_IPSR_MSEL(IP9_27_26, SD2_DAT2_B, SEL_SD2_1),
1247 PINMUX_IPSR_DATA(IP9_27_26, ARM_TRACEDATA_8), 1246 PINMUX_IPSR_DATA(IP9_27_26, ARM_TRACEDATA_8),
1248 PINMUX_IPSR_DATA(IP9_29_28, VI0_G7), 1247 PINMUX_IPSR_DATA(IP9_29_28, VI0_G7),
1249 PINMUX_IPSR_DATA(IP9_29_28, ETH_RXD1), 1248 PINMUX_IPSR_DATA(IP9_29_28, ETH_RXD1),
1250 PINMUX_IPSR_MODSEL_DATA(IP9_29_28, SD2_DAT3_B, SEL_SD2_1), 1249 PINMUX_IPSR_MSEL(IP9_29_28, SD2_DAT3_B, SEL_SD2_1),
1251 PINMUX_IPSR_DATA(IP9_29_28, ARM_TRACEDATA_9), 1250 PINMUX_IPSR_DATA(IP9_29_28, ARM_TRACEDATA_9),
1252 1251
1253 PINMUX_IPSR_DATA(IP10_2_0, VI0_R0), 1252 PINMUX_IPSR_DATA(IP10_2_0, VI0_R0),
1254 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SSI_SDATA7_C, SEL_SSI7_2), 1253 PINMUX_IPSR_MSEL(IP10_2_0, SSI_SDATA7_C, SEL_SSI7_2),
1255 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SCK1_C, SEL_SCIF1_2), 1254 PINMUX_IPSR_MSEL(IP10_2_0, SCK1_C, SEL_SCIF1_2),
1256 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, DREQ1_B, SEL_EXBUS1_0), 1255 PINMUX_IPSR_MSEL(IP10_2_0, DREQ1_B, SEL_EXBUS1_0),
1257 PINMUX_IPSR_DATA(IP10_2_0, ARM_TRACEDATA_10), 1256 PINMUX_IPSR_DATA(IP10_2_0, ARM_TRACEDATA_10),
1258 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, DREQ0_C, SEL_EXBUS0_2), 1257 PINMUX_IPSR_MSEL(IP10_2_0, DREQ0_C, SEL_EXBUS0_2),
1259 PINMUX_IPSR_DATA(IP10_5_3, VI0_R1), 1258 PINMUX_IPSR_DATA(IP10_5_3, VI0_R1),
1260 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SSI_SDATA8_C, SEL_SSI8_2), 1259 PINMUX_IPSR_MSEL(IP10_5_3, SSI_SDATA8_C, SEL_SSI8_2),
1261 PINMUX_IPSR_DATA(IP10_5_3, DACK1_B), 1260 PINMUX_IPSR_DATA(IP10_5_3, DACK1_B),
1262 PINMUX_IPSR_DATA(IP10_5_3, ARM_TRACEDATA_11), 1261 PINMUX_IPSR_DATA(IP10_5_3, ARM_TRACEDATA_11),
1263 PINMUX_IPSR_DATA(IP10_5_3, DACK0_C), 1262 PINMUX_IPSR_DATA(IP10_5_3, DACK0_C),
@@ -1265,74 +1264,74 @@ static const u16 pinmux_data[] = {
1265 PINMUX_IPSR_DATA(IP10_8_6, VI0_R2), 1264 PINMUX_IPSR_DATA(IP10_8_6, VI0_R2),
1266 PINMUX_IPSR_DATA(IP10_8_6, ETH_LINK), 1265 PINMUX_IPSR_DATA(IP10_8_6, ETH_LINK),
1267 PINMUX_IPSR_DATA(IP10_8_6, SD2_CLK_B), 1266 PINMUX_IPSR_DATA(IP10_8_6, SD2_CLK_B),
1268 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, IRQ2, SEL_INT2_0), 1267 PINMUX_IPSR_MSEL(IP10_8_6, IRQ2, SEL_INT2_0),
1269 PINMUX_IPSR_DATA(IP10_8_6, ARM_TRACEDATA_12), 1268 PINMUX_IPSR_DATA(IP10_8_6, ARM_TRACEDATA_12),
1270 PINMUX_IPSR_DATA(IP10_11_9, VI0_R3), 1269 PINMUX_IPSR_DATA(IP10_11_9, VI0_R3),
1271 PINMUX_IPSR_DATA(IP10_11_9, ETH_MAGIC), 1270 PINMUX_IPSR_DATA(IP10_11_9, ETH_MAGIC),
1272 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SD2_CMD_B, SEL_SD2_1), 1271 PINMUX_IPSR_MSEL(IP10_11_9, SD2_CMD_B, SEL_SD2_1),
1273 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, IRQ3, SEL_INT3_0), 1272 PINMUX_IPSR_MSEL(IP10_11_9, IRQ3, SEL_INT3_0),
1274 PINMUX_IPSR_DATA(IP10_11_9, ARM_TRACEDATA_13), 1273 PINMUX_IPSR_DATA(IP10_11_9, ARM_TRACEDATA_13),
1275 PINMUX_IPSR_DATA(IP10_14_12, VI0_R4), 1274 PINMUX_IPSR_DATA(IP10_14_12, VI0_R4),
1276 PINMUX_IPSR_DATA(IP10_14_12, ETH_REFCLK), 1275 PINMUX_IPSR_DATA(IP10_14_12, ETH_REFCLK),
1277 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SD2_CD_B, SEL_SD2_1), 1276 PINMUX_IPSR_MSEL(IP10_14_12, SD2_CD_B, SEL_SD2_1),
1278 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, HSPI_CLK1_B, SEL_HSPI1_1), 1277 PINMUX_IPSR_MSEL(IP10_14_12, HSPI_CLK1_B, SEL_HSPI1_1),
1279 PINMUX_IPSR_DATA(IP10_14_12, ARM_TRACEDATA_14), 1278 PINMUX_IPSR_DATA(IP10_14_12, ARM_TRACEDATA_14),
1280 PINMUX_IPSR_DATA(IP10_14_12, MT1_CLK), 1279 PINMUX_IPSR_DATA(IP10_14_12, MT1_CLK),
1281 PINMUX_IPSR_DATA(IP10_14_12, TS_SCK0), 1280 PINMUX_IPSR_DATA(IP10_14_12, TS_SCK0),
1282 PINMUX_IPSR_DATA(IP10_17_15, VI0_R5), 1281 PINMUX_IPSR_DATA(IP10_17_15, VI0_R5),
1283 PINMUX_IPSR_DATA(IP10_17_15, ETH_TXD0), 1282 PINMUX_IPSR_DATA(IP10_17_15, ETH_TXD0),
1284 PINMUX_IPSR_MODSEL_DATA(IP10_17_15, SD2_WP_B, SEL_SD2_1), 1283 PINMUX_IPSR_MSEL(IP10_17_15, SD2_WP_B, SEL_SD2_1),
1285 PINMUX_IPSR_MODSEL_DATA(IP10_17_15, HSPI_CS1_B, SEL_HSPI1_1), 1284 PINMUX_IPSR_MSEL(IP10_17_15, HSPI_CS1_B, SEL_HSPI1_1),
1286 PINMUX_IPSR_DATA(IP10_17_15, ARM_TRACEDATA_15), 1285 PINMUX_IPSR_DATA(IP10_17_15, ARM_TRACEDATA_15),
1287 PINMUX_IPSR_DATA(IP10_17_15, MT1_D), 1286 PINMUX_IPSR_DATA(IP10_17_15, MT1_D),
1288 PINMUX_IPSR_DATA(IP10_17_15, TS_SDEN0), 1287 PINMUX_IPSR_DATA(IP10_17_15, TS_SDEN0),
1289 PINMUX_IPSR_DATA(IP10_20_18, VI0_R6), 1288 PINMUX_IPSR_DATA(IP10_20_18, VI0_R6),
1290 PINMUX_IPSR_DATA(IP10_20_18, ETH_MDC), 1289 PINMUX_IPSR_DATA(IP10_20_18, ETH_MDC),
1291 PINMUX_IPSR_MODSEL_DATA(IP10_20_18, DREQ2_C, SEL_EXBUS2_2), 1290 PINMUX_IPSR_MSEL(IP10_20_18, DREQ2_C, SEL_EXBUS2_2),
1292 PINMUX_IPSR_DATA(IP10_20_18, HSPI_TX1_B), 1291 PINMUX_IPSR_DATA(IP10_20_18, HSPI_TX1_B),
1293 PINMUX_IPSR_DATA(IP10_20_18, TRACECLK), 1292 PINMUX_IPSR_DATA(IP10_20_18, TRACECLK),
1294 PINMUX_IPSR_DATA(IP10_20_18, MT1_BEN), 1293 PINMUX_IPSR_DATA(IP10_20_18, MT1_BEN),
1295 PINMUX_IPSR_MODSEL_DATA(IP10_20_18, PWMFSW0_D, SEL_PWMFSW_3), 1294 PINMUX_IPSR_MSEL(IP10_20_18, PWMFSW0_D, SEL_PWMFSW_3),
1296 PINMUX_IPSR_DATA(IP10_23_21, VI0_R7), 1295 PINMUX_IPSR_DATA(IP10_23_21, VI0_R7),
1297 PINMUX_IPSR_DATA(IP10_23_21, ETH_MDIO), 1296 PINMUX_IPSR_DATA(IP10_23_21, ETH_MDIO),
1298 PINMUX_IPSR_DATA(IP10_23_21, DACK2_C), 1297 PINMUX_IPSR_DATA(IP10_23_21, DACK2_C),
1299 PINMUX_IPSR_MODSEL_DATA(IP10_23_21, HSPI_RX1_B, SEL_HSPI1_1), 1298 PINMUX_IPSR_MSEL(IP10_23_21, HSPI_RX1_B, SEL_HSPI1_1),
1300 PINMUX_IPSR_MODSEL_DATA(IP10_23_21, SCIF_CLK_D, SEL_SCIF_3), 1299 PINMUX_IPSR_MSEL(IP10_23_21, SCIF_CLK_D, SEL_SCIF_3),
1301 PINMUX_IPSR_DATA(IP10_23_21, TRACECTL), 1300 PINMUX_IPSR_DATA(IP10_23_21, TRACECTL),
1302 PINMUX_IPSR_DATA(IP10_23_21, MT1_PEN), 1301 PINMUX_IPSR_DATA(IP10_23_21, MT1_PEN),
1303 PINMUX_IPSR_DATA(IP10_25_24, VI1_CLK), 1302 PINMUX_IPSR_DATA(IP10_25_24, VI1_CLK),
1304 PINMUX_IPSR_MODSEL_DATA(IP10_25_24, SIM_D, SEL_SIM_0), 1303 PINMUX_IPSR_MSEL(IP10_25_24, SIM_D, SEL_SIM_0),
1305 PINMUX_IPSR_MODSEL_DATA(IP10_25_24, SDA3, SEL_I2C3_0), 1304 PINMUX_IPSR_MSEL(IP10_25_24, SDA3, SEL_I2C3_0),
1306 PINMUX_IPSR_DATA(IP10_28_26, VI1_HSYNC), 1305 PINMUX_IPSR_DATA(IP10_28_26, VI1_HSYNC),
1307 PINMUX_IPSR_DATA(IP10_28_26, VI3_CLK), 1306 PINMUX_IPSR_DATA(IP10_28_26, VI3_CLK),
1308 PINMUX_IPSR_DATA(IP10_28_26, SSI_SCK4), 1307 PINMUX_IPSR_DATA(IP10_28_26, SSI_SCK4),
1309 PINMUX_IPSR_MODSEL_DATA(IP10_28_26, GPS_SIGN_C, SEL_GPS_2), 1308 PINMUX_IPSR_MSEL(IP10_28_26, GPS_SIGN_C, SEL_GPS_2),
1310 PINMUX_IPSR_MODSEL_DATA(IP10_28_26, PWMFSW0_E, SEL_PWMFSW_4), 1309 PINMUX_IPSR_MSEL(IP10_28_26, PWMFSW0_E, SEL_PWMFSW_4),
1311 PINMUX_IPSR_DATA(IP10_31_29, VI1_VSYNC), 1310 PINMUX_IPSR_DATA(IP10_31_29, VI1_VSYNC),
1312 PINMUX_IPSR_DATA(IP10_31_29, AUDIO_CLKOUT_C), 1311 PINMUX_IPSR_DATA(IP10_31_29, AUDIO_CLKOUT_C),
1313 PINMUX_IPSR_DATA(IP10_31_29, SSI_WS4), 1312 PINMUX_IPSR_DATA(IP10_31_29, SSI_WS4),
1314 PINMUX_IPSR_DATA(IP10_31_29, SIM_CLK), 1313 PINMUX_IPSR_DATA(IP10_31_29, SIM_CLK),
1315 PINMUX_IPSR_MODSEL_DATA(IP10_31_29, GPS_MAG_C, SEL_GPS_2), 1314 PINMUX_IPSR_MSEL(IP10_31_29, GPS_MAG_C, SEL_GPS_2),
1316 PINMUX_IPSR_DATA(IP10_31_29, SPV_TRST), 1315 PINMUX_IPSR_DATA(IP10_31_29, SPV_TRST),
1317 PINMUX_IPSR_MODSEL_DATA(IP10_31_29, SCL3, SEL_I2C3_0), 1316 PINMUX_IPSR_MSEL(IP10_31_29, SCL3, SEL_I2C3_0),
1318 1317
1319 PINMUX_IPSR_DATA(IP11_2_0, VI1_DATA0_VI1_B0), 1318 PINMUX_IPSR_DATA(IP11_2_0, VI1_DATA0_VI1_B0),
1320 PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SD2_DAT0, SEL_SD2_0), 1319 PINMUX_IPSR_MSEL(IP11_2_0, SD2_DAT0, SEL_SD2_0),
1321 PINMUX_IPSR_DATA(IP11_2_0, SIM_RST), 1320 PINMUX_IPSR_DATA(IP11_2_0, SIM_RST),
1322 PINMUX_IPSR_DATA(IP11_2_0, SPV_TCK), 1321 PINMUX_IPSR_DATA(IP11_2_0, SPV_TCK),
1323 PINMUX_IPSR_DATA(IP11_2_0, ADICLK_B), 1322 PINMUX_IPSR_DATA(IP11_2_0, ADICLK_B),
1324 PINMUX_IPSR_DATA(IP11_5_3, VI1_DATA1_VI1_B1), 1323 PINMUX_IPSR_DATA(IP11_5_3, VI1_DATA1_VI1_B1),
1325 PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SD2_DAT1, SEL_SD2_0), 1324 PINMUX_IPSR_MSEL(IP11_5_3, SD2_DAT1, SEL_SD2_0),
1326 PINMUX_IPSR_DATA(IP11_5_3, MT0_CLK), 1325 PINMUX_IPSR_DATA(IP11_5_3, MT0_CLK),
1327 PINMUX_IPSR_DATA(IP11_5_3, SPV_TMS), 1326 PINMUX_IPSR_DATA(IP11_5_3, SPV_TMS),
1328 PINMUX_IPSR_MODSEL_DATA(IP11_5_3, ADICS_B_SAMP_B, SEL_ADI_1), 1327 PINMUX_IPSR_MSEL(IP11_5_3, ADICS_B_SAMP_B, SEL_ADI_1),
1329 PINMUX_IPSR_DATA(IP11_8_6, VI1_DATA2_VI1_B2), 1328 PINMUX_IPSR_DATA(IP11_8_6, VI1_DATA2_VI1_B2),
1330 PINMUX_IPSR_MODSEL_DATA(IP11_8_6, SD2_DAT2, SEL_SD2_0), 1329 PINMUX_IPSR_MSEL(IP11_8_6, SD2_DAT2, SEL_SD2_0),
1331 PINMUX_IPSR_DATA(IP11_8_6, MT0_D), 1330 PINMUX_IPSR_DATA(IP11_8_6, MT0_D),
1332 PINMUX_IPSR_DATA(IP11_8_6, SPVTDI), 1331 PINMUX_IPSR_DATA(IP11_8_6, SPVTDI),
1333 PINMUX_IPSR_MODSEL_DATA(IP11_8_6, ADIDATA_B, SEL_ADI_1), 1332 PINMUX_IPSR_MSEL(IP11_8_6, ADIDATA_B, SEL_ADI_1),
1334 PINMUX_IPSR_DATA(IP11_11_9, VI1_DATA3_VI1_B3), 1333 PINMUX_IPSR_DATA(IP11_11_9, VI1_DATA3_VI1_B3),
1335 PINMUX_IPSR_MODSEL_DATA(IP11_11_9, SD2_DAT3, SEL_SD2_0), 1334 PINMUX_IPSR_MSEL(IP11_11_9, SD2_DAT3, SEL_SD2_0),
1336 PINMUX_IPSR_DATA(IP11_11_9, MT0_BEN), 1335 PINMUX_IPSR_DATA(IP11_11_9, MT0_BEN),
1337 PINMUX_IPSR_DATA(IP11_11_9, SPV_TDO), 1336 PINMUX_IPSR_DATA(IP11_11_9, SPV_TDO),
1338 PINMUX_IPSR_DATA(IP11_11_9, ADICHS0_B), 1337 PINMUX_IPSR_DATA(IP11_11_9, ADICHS0_B),
@@ -1340,74 +1339,74 @@ static const u16 pinmux_data[] = {
1340 PINMUX_IPSR_DATA(IP11_14_12, SD2_CLK), 1339 PINMUX_IPSR_DATA(IP11_14_12, SD2_CLK),
1341 PINMUX_IPSR_DATA(IP11_14_12, MT0_PEN), 1340 PINMUX_IPSR_DATA(IP11_14_12, MT0_PEN),
1342 PINMUX_IPSR_DATA(IP11_14_12, SPA_TRST), 1341 PINMUX_IPSR_DATA(IP11_14_12, SPA_TRST),
1343 PINMUX_IPSR_MODSEL_DATA(IP11_14_12, HSPI_CLK1_D, SEL_HSPI1_3), 1342 PINMUX_IPSR_MSEL(IP11_14_12, HSPI_CLK1_D, SEL_HSPI1_3),
1344 PINMUX_IPSR_DATA(IP11_14_12, ADICHS1_B), 1343 PINMUX_IPSR_DATA(IP11_14_12, ADICHS1_B),
1345 PINMUX_IPSR_DATA(IP11_17_15, VI1_DATA5_VI1_B5), 1344 PINMUX_IPSR_DATA(IP11_17_15, VI1_DATA5_VI1_B5),
1346 PINMUX_IPSR_MODSEL_DATA(IP11_17_15, SD2_CMD, SEL_SD2_0), 1345 PINMUX_IPSR_MSEL(IP11_17_15, SD2_CMD, SEL_SD2_0),
1347 PINMUX_IPSR_DATA(IP11_17_15, MT0_SYNC), 1346 PINMUX_IPSR_DATA(IP11_17_15, MT0_SYNC),
1348 PINMUX_IPSR_DATA(IP11_17_15, SPA_TCK), 1347 PINMUX_IPSR_DATA(IP11_17_15, SPA_TCK),
1349 PINMUX_IPSR_MODSEL_DATA(IP11_17_15, HSPI_CS1_D, SEL_HSPI1_3), 1348 PINMUX_IPSR_MSEL(IP11_17_15, HSPI_CS1_D, SEL_HSPI1_3),
1350 PINMUX_IPSR_DATA(IP11_17_15, ADICHS2_B), 1349 PINMUX_IPSR_DATA(IP11_17_15, ADICHS2_B),
1351 PINMUX_IPSR_DATA(IP11_20_18, VI1_DATA6_VI1_B6), 1350 PINMUX_IPSR_DATA(IP11_20_18, VI1_DATA6_VI1_B6),
1352 PINMUX_IPSR_MODSEL_DATA(IP11_20_18, SD2_CD, SEL_SD2_0), 1351 PINMUX_IPSR_MSEL(IP11_20_18, SD2_CD, SEL_SD2_0),
1353 PINMUX_IPSR_DATA(IP11_20_18, MT0_VCXO), 1352 PINMUX_IPSR_DATA(IP11_20_18, MT0_VCXO),
1354 PINMUX_IPSR_DATA(IP11_20_18, SPA_TMS), 1353 PINMUX_IPSR_DATA(IP11_20_18, SPA_TMS),
1355 PINMUX_IPSR_DATA(IP11_20_18, HSPI_TX1_D), 1354 PINMUX_IPSR_DATA(IP11_20_18, HSPI_TX1_D),
1356 PINMUX_IPSR_DATA(IP11_23_21, VI1_DATA7_VI1_B7), 1355 PINMUX_IPSR_DATA(IP11_23_21, VI1_DATA7_VI1_B7),
1357 PINMUX_IPSR_MODSEL_DATA(IP11_23_21, SD2_WP, SEL_SD2_0), 1356 PINMUX_IPSR_MSEL(IP11_23_21, SD2_WP, SEL_SD2_0),
1358 PINMUX_IPSR_DATA(IP11_23_21, MT0_PWM), 1357 PINMUX_IPSR_DATA(IP11_23_21, MT0_PWM),
1359 PINMUX_IPSR_DATA(IP11_23_21, SPA_TDI), 1358 PINMUX_IPSR_DATA(IP11_23_21, SPA_TDI),
1360 PINMUX_IPSR_MODSEL_DATA(IP11_23_21, HSPI_RX1_D, SEL_HSPI1_3), 1359 PINMUX_IPSR_MSEL(IP11_23_21, HSPI_RX1_D, SEL_HSPI1_3),
1361 PINMUX_IPSR_DATA(IP11_26_24, VI1_G0), 1360 PINMUX_IPSR_DATA(IP11_26_24, VI1_G0),
1362 PINMUX_IPSR_DATA(IP11_26_24, VI3_DATA0), 1361 PINMUX_IPSR_DATA(IP11_26_24, VI3_DATA0),
1363 PINMUX_IPSR_DATA(IP11_26_24, TS_SCK1), 1362 PINMUX_IPSR_DATA(IP11_26_24, TS_SCK1),
1364 PINMUX_IPSR_MODSEL_DATA(IP11_26_24, DREQ2_B, SEL_EXBUS2_1), 1363 PINMUX_IPSR_MSEL(IP11_26_24, DREQ2_B, SEL_EXBUS2_1),
1365 PINMUX_IPSR_DATA(IP11_26_24, TX2), 1364 PINMUX_IPSR_DATA(IP11_26_24, TX2),
1366 PINMUX_IPSR_DATA(IP11_26_24, SPA_TDO), 1365 PINMUX_IPSR_DATA(IP11_26_24, SPA_TDO),
1367 PINMUX_IPSR_MODSEL_DATA(IP11_26_24, HCTS0_B, SEL_HSCIF0_1), 1366 PINMUX_IPSR_MSEL(IP11_26_24, HCTS0_B, SEL_HSCIF0_1),
1368 PINMUX_IPSR_DATA(IP11_29_27, VI1_G1), 1367 PINMUX_IPSR_DATA(IP11_29_27, VI1_G1),
1369 PINMUX_IPSR_DATA(IP11_29_27, VI3_DATA1), 1368 PINMUX_IPSR_DATA(IP11_29_27, VI3_DATA1),
1370 PINMUX_IPSR_DATA(IP11_29_27, SSI_SCK1), 1369 PINMUX_IPSR_DATA(IP11_29_27, SSI_SCK1),
1371 PINMUX_IPSR_DATA(IP11_29_27, TS_SDEN1), 1370 PINMUX_IPSR_DATA(IP11_29_27, TS_SDEN1),
1372 PINMUX_IPSR_DATA(IP11_29_27, DACK2_B), 1371 PINMUX_IPSR_DATA(IP11_29_27, DACK2_B),
1373 PINMUX_IPSR_MODSEL_DATA(IP11_29_27, RX2, SEL_SCIF2_0), 1372 PINMUX_IPSR_MSEL(IP11_29_27, RX2, SEL_SCIF2_0),
1374 PINMUX_IPSR_MODSEL_DATA(IP11_29_27, HRTS0_B, SEL_HSCIF0_1), 1373 PINMUX_IPSR_MSEL(IP11_29_27, HRTS0_B, SEL_HSCIF0_1),
1375 1374
1376 PINMUX_IPSR_DATA(IP12_2_0, VI1_G2), 1375 PINMUX_IPSR_DATA(IP12_2_0, VI1_G2),
1377 PINMUX_IPSR_DATA(IP12_2_0, VI3_DATA2), 1376 PINMUX_IPSR_DATA(IP12_2_0, VI3_DATA2),
1378 PINMUX_IPSR_DATA(IP12_2_0, SSI_WS1), 1377 PINMUX_IPSR_DATA(IP12_2_0, SSI_WS1),
1379 PINMUX_IPSR_DATA(IP12_2_0, TS_SPSYNC1), 1378 PINMUX_IPSR_DATA(IP12_2_0, TS_SPSYNC1),
1380 PINMUX_IPSR_MODSEL_DATA(IP12_2_0, SCK2, SEL_SCIF2_0), 1379 PINMUX_IPSR_MSEL(IP12_2_0, SCK2, SEL_SCIF2_0),
1381 PINMUX_IPSR_MODSEL_DATA(IP12_2_0, HSCK0_B, SEL_HSCIF0_1), 1380 PINMUX_IPSR_MSEL(IP12_2_0, HSCK0_B, SEL_HSCIF0_1),
1382 PINMUX_IPSR_DATA(IP12_5_3, VI1_G3), 1381 PINMUX_IPSR_DATA(IP12_5_3, VI1_G3),
1383 PINMUX_IPSR_DATA(IP12_5_3, VI3_DATA3), 1382 PINMUX_IPSR_DATA(IP12_5_3, VI3_DATA3),
1384 PINMUX_IPSR_DATA(IP12_5_3, SSI_SCK2), 1383 PINMUX_IPSR_DATA(IP12_5_3, SSI_SCK2),
1385 PINMUX_IPSR_DATA(IP12_5_3, TS_SDAT1), 1384 PINMUX_IPSR_DATA(IP12_5_3, TS_SDAT1),
1386 PINMUX_IPSR_MODSEL_DATA(IP12_5_3, SCL1_C, SEL_I2C1_2), 1385 PINMUX_IPSR_MSEL(IP12_5_3, SCL1_C, SEL_I2C1_2),
1387 PINMUX_IPSR_DATA(IP12_5_3, HTX0_B), 1386 PINMUX_IPSR_DATA(IP12_5_3, HTX0_B),
1388 PINMUX_IPSR_DATA(IP12_8_6, VI1_G4), 1387 PINMUX_IPSR_DATA(IP12_8_6, VI1_G4),
1389 PINMUX_IPSR_DATA(IP12_8_6, VI3_DATA4), 1388 PINMUX_IPSR_DATA(IP12_8_6, VI3_DATA4),
1390 PINMUX_IPSR_DATA(IP12_8_6, SSI_WS2), 1389 PINMUX_IPSR_DATA(IP12_8_6, SSI_WS2),
1391 PINMUX_IPSR_MODSEL_DATA(IP12_8_6, SDA1_C, SEL_I2C1_2), 1390 PINMUX_IPSR_MSEL(IP12_8_6, SDA1_C, SEL_I2C1_2),
1392 PINMUX_IPSR_DATA(IP12_8_6, SIM_RST_B), 1391 PINMUX_IPSR_DATA(IP12_8_6, SIM_RST_B),
1393 PINMUX_IPSR_MODSEL_DATA(IP12_8_6, HRX0_B, SEL_HSCIF0_1), 1392 PINMUX_IPSR_MSEL(IP12_8_6, HRX0_B, SEL_HSCIF0_1),
1394 PINMUX_IPSR_DATA(IP12_11_9, VI1_G5), 1393 PINMUX_IPSR_DATA(IP12_11_9, VI1_G5),
1395 PINMUX_IPSR_DATA(IP12_11_9, VI3_DATA5), 1394 PINMUX_IPSR_DATA(IP12_11_9, VI3_DATA5),
1396 PINMUX_IPSR_MODSEL_DATA(IP12_11_9, GPS_CLK, SEL_GPS_0), 1395 PINMUX_IPSR_MSEL(IP12_11_9, GPS_CLK, SEL_GPS_0),
1397 PINMUX_IPSR_DATA(IP12_11_9, FSE), 1396 PINMUX_IPSR_DATA(IP12_11_9, FSE),
1398 PINMUX_IPSR_DATA(IP12_11_9, TX4_B), 1397 PINMUX_IPSR_DATA(IP12_11_9, TX4_B),
1399 PINMUX_IPSR_MODSEL_DATA(IP12_11_9, SIM_D_B, SEL_SIM_1), 1398 PINMUX_IPSR_MSEL(IP12_11_9, SIM_D_B, SEL_SIM_1),
1400 PINMUX_IPSR_DATA(IP12_14_12, VI1_G6), 1399 PINMUX_IPSR_DATA(IP12_14_12, VI1_G6),
1401 PINMUX_IPSR_DATA(IP12_14_12, VI3_DATA6), 1400 PINMUX_IPSR_DATA(IP12_14_12, VI3_DATA6),
1402 PINMUX_IPSR_MODSEL_DATA(IP12_14_12, GPS_SIGN, SEL_GPS_0), 1401 PINMUX_IPSR_MSEL(IP12_14_12, GPS_SIGN, SEL_GPS_0),
1403 PINMUX_IPSR_DATA(IP12_14_12, FRB), 1402 PINMUX_IPSR_DATA(IP12_14_12, FRB),
1404 PINMUX_IPSR_MODSEL_DATA(IP12_14_12, RX4_B, SEL_SCIF4_1), 1403 PINMUX_IPSR_MSEL(IP12_14_12, RX4_B, SEL_SCIF4_1),
1405 PINMUX_IPSR_DATA(IP12_14_12, SIM_CLK_B), 1404 PINMUX_IPSR_DATA(IP12_14_12, SIM_CLK_B),
1406 PINMUX_IPSR_DATA(IP12_17_15, VI1_G7), 1405 PINMUX_IPSR_DATA(IP12_17_15, VI1_G7),
1407 PINMUX_IPSR_DATA(IP12_17_15, VI3_DATA7), 1406 PINMUX_IPSR_DATA(IP12_17_15, VI3_DATA7),
1408 PINMUX_IPSR_MODSEL_DATA(IP12_17_15, GPS_MAG, SEL_GPS_0), 1407 PINMUX_IPSR_MSEL(IP12_17_15, GPS_MAG, SEL_GPS_0),
1409 PINMUX_IPSR_DATA(IP12_17_15, FCE), 1408 PINMUX_IPSR_DATA(IP12_17_15, FCE),
1410 PINMUX_IPSR_MODSEL_DATA(IP12_17_15, SCK4_B, SEL_SCIF4_1), 1409 PINMUX_IPSR_MSEL(IP12_17_15, SCK4_B, SEL_SCIF4_1),
1411}; 1410};
1412 1411
1413static const struct sh_pfc_pin pinmux_pins[] = { 1412static const struct sh_pfc_pin pinmux_pins[] = {
@@ -3868,6 +3867,6 @@ const struct sh_pfc_soc_info r8a7779_pinmux_info = {
3868 3867
3869 .cfg_regs = pinmux_config_regs, 3868 .cfg_regs = pinmux_config_regs,
3870 3869
3871 .gpio_data = pinmux_data, 3870 .pinmux_data = pinmux_data,
3872 .gpio_data_size = ARRAY_SIZE(pinmux_data), 3871 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
3873}; 3872};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index fc344a7c2b53..d9924b0d53b7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/platform_data/gpio-rcar.h>
26 25
27#include "core.h" 26#include "core.h"
28#include "sh_pfc.h" 27#include "sh_pfc.h"
@@ -818,103 +817,103 @@ static const u16 pinmux_data[] = {
818 PINMUX_DATA(DU_DOTCLKIN2_MARK, FN_DU_DOTCLKIN2), 817 PINMUX_DATA(DU_DOTCLKIN2_MARK, FN_DU_DOTCLKIN2),
819 818
820 PINMUX_IPSR_DATA(IP0_2_0, D0), 819 PINMUX_IPSR_DATA(IP0_2_0, D0),
821 PINMUX_IPSR_MODSEL_DATA(IP0_2_0, MSIOF3_SCK_B, SEL_SOF3_1), 820 PINMUX_IPSR_MSEL(IP0_2_0, MSIOF3_SCK_B, SEL_SOF3_1),
822 PINMUX_IPSR_MODSEL_DATA(IP0_2_0, VI3_DATA0, SEL_VI3_0), 821 PINMUX_IPSR_MSEL(IP0_2_0, VI3_DATA0, SEL_VI3_0),
823 PINMUX_IPSR_MODSEL_DATA(IP0_2_0, VI0_G4, SEL_VI0_0), 822 PINMUX_IPSR_MSEL(IP0_2_0, VI0_G4, SEL_VI0_0),
824 PINMUX_IPSR_MODSEL_DATA(IP0_2_0, VI0_G4_B, SEL_VI0_1), 823 PINMUX_IPSR_MSEL(IP0_2_0, VI0_G4_B, SEL_VI0_1),
825 PINMUX_IPSR_DATA(IP0_5_3, D1), 824 PINMUX_IPSR_DATA(IP0_5_3, D1),
826 PINMUX_IPSR_MODSEL_DATA(IP0_5_3, MSIOF3_SYNC_B, SEL_SOF3_1), 825 PINMUX_IPSR_MSEL(IP0_5_3, MSIOF3_SYNC_B, SEL_SOF3_1),
827 PINMUX_IPSR_MODSEL_DATA(IP0_5_3, VI3_DATA1, SEL_VI3_0), 826 PINMUX_IPSR_MSEL(IP0_5_3, VI3_DATA1, SEL_VI3_0),
828 PINMUX_IPSR_MODSEL_DATA(IP0_5_3, VI0_G5, SEL_VI0_0), 827 PINMUX_IPSR_MSEL(IP0_5_3, VI0_G5, SEL_VI0_0),
829 PINMUX_IPSR_MODSEL_DATA(IP0_5_3, VI0_G5_B, SEL_VI0_1), 828 PINMUX_IPSR_MSEL(IP0_5_3, VI0_G5_B, SEL_VI0_1),
830 PINMUX_IPSR_DATA(IP0_8_6, D2), 829 PINMUX_IPSR_DATA(IP0_8_6, D2),
831 PINMUX_IPSR_MODSEL_DATA(IP0_8_6, MSIOF3_RXD_B, SEL_SOF3_1), 830 PINMUX_IPSR_MSEL(IP0_8_6, MSIOF3_RXD_B, SEL_SOF3_1),
832 PINMUX_IPSR_MODSEL_DATA(IP0_8_6, VI3_DATA2, SEL_VI3_0), 831 PINMUX_IPSR_MSEL(IP0_8_6, VI3_DATA2, SEL_VI3_0),
833 PINMUX_IPSR_MODSEL_DATA(IP0_8_6, VI0_G6, SEL_VI0_0), 832 PINMUX_IPSR_MSEL(IP0_8_6, VI0_G6, SEL_VI0_0),
834 PINMUX_IPSR_MODSEL_DATA(IP0_8_6, VI0_G6_B, SEL_VI0_1), 833 PINMUX_IPSR_MSEL(IP0_8_6, VI0_G6_B, SEL_VI0_1),
835 PINMUX_IPSR_DATA(IP0_11_9, D3), 834 PINMUX_IPSR_DATA(IP0_11_9, D3),
836 PINMUX_IPSR_MODSEL_DATA(IP0_11_9, MSIOF3_TXD_B, SEL_SOF3_1), 835 PINMUX_IPSR_MSEL(IP0_11_9, MSIOF3_TXD_B, SEL_SOF3_1),
837 PINMUX_IPSR_MODSEL_DATA(IP0_11_9, VI3_DATA3, SEL_VI3_0), 836 PINMUX_IPSR_MSEL(IP0_11_9, VI3_DATA3, SEL_VI3_0),
838 PINMUX_IPSR_MODSEL_DATA(IP0_11_9, VI0_G7, SEL_VI0_0), 837 PINMUX_IPSR_MSEL(IP0_11_9, VI0_G7, SEL_VI0_0),
839 PINMUX_IPSR_MODSEL_DATA(IP0_11_9, VI0_G7_B, SEL_VI0_1), 838 PINMUX_IPSR_MSEL(IP0_11_9, VI0_G7_B, SEL_VI0_1),
840 PINMUX_IPSR_DATA(IP0_15_12, D4), 839 PINMUX_IPSR_DATA(IP0_15_12, D4),
841 PINMUX_IPSR_MODSEL_DATA(IP0_15_12, SCIFB1_RXD_F, SEL_SCIFB1_5), 840 PINMUX_IPSR_MSEL(IP0_15_12, SCIFB1_RXD_F, SEL_SCIFB1_5),
842 PINMUX_IPSR_MODSEL_DATA(IP0_15_12, SCIFB0_RXD_C, SEL_SCIFB_2), 841 PINMUX_IPSR_MSEL(IP0_15_12, SCIFB0_RXD_C, SEL_SCIFB_2),
843 PINMUX_IPSR_MODSEL_DATA(IP0_15_12, VI3_DATA4, SEL_VI3_0), 842 PINMUX_IPSR_MSEL(IP0_15_12, VI3_DATA4, SEL_VI3_0),
844 PINMUX_IPSR_MODSEL_DATA(IP0_15_12, VI0_R0, SEL_VI0_0), 843 PINMUX_IPSR_MSEL(IP0_15_12, VI0_R0, SEL_VI0_0),
845 PINMUX_IPSR_MODSEL_DATA(IP0_15_12, VI0_R0_B, SEL_VI0_1), 844 PINMUX_IPSR_MSEL(IP0_15_12, VI0_R0_B, SEL_VI0_1),
846 PINMUX_IPSR_MODSEL_DATA(IP0_15_12, RX0_B, SEL_SCIF0_1), 845 PINMUX_IPSR_MSEL(IP0_15_12, RX0_B, SEL_SCIF0_1),
847 PINMUX_IPSR_DATA(IP0_19_16, D5), 846 PINMUX_IPSR_DATA(IP0_19_16, D5),
848 PINMUX_IPSR_MODSEL_DATA(IP0_19_16, SCIFB1_TXD_F, SEL_SCIFB1_5), 847 PINMUX_IPSR_MSEL(IP0_19_16, SCIFB1_TXD_F, SEL_SCIFB1_5),
849 PINMUX_IPSR_MODSEL_DATA(IP0_19_16, SCIFB0_TXD_C, SEL_SCIFB_2), 848 PINMUX_IPSR_MSEL(IP0_19_16, SCIFB0_TXD_C, SEL_SCIFB_2),
850 PINMUX_IPSR_MODSEL_DATA(IP0_19_16, VI3_DATA5, SEL_VI3_0), 849 PINMUX_IPSR_MSEL(IP0_19_16, VI3_DATA5, SEL_VI3_0),
851 PINMUX_IPSR_MODSEL_DATA(IP0_19_16, VI0_R1, SEL_VI0_0), 850 PINMUX_IPSR_MSEL(IP0_19_16, VI0_R1, SEL_VI0_0),
852 PINMUX_IPSR_MODSEL_DATA(IP0_19_16, VI0_R1_B, SEL_VI0_1), 851 PINMUX_IPSR_MSEL(IP0_19_16, VI0_R1_B, SEL_VI0_1),
853 PINMUX_IPSR_MODSEL_DATA(IP0_19_16, TX0_B, SEL_SCIF0_1), 852 PINMUX_IPSR_MSEL(IP0_19_16, TX0_B, SEL_SCIF0_1),
854 PINMUX_IPSR_DATA(IP0_22_20, D6), 853 PINMUX_IPSR_DATA(IP0_22_20, D6),
855 PINMUX_IPSR_MODSEL_DATA(IP0_22_20, IIC2_SCL_C, SEL_IIC2_2), 854 PINMUX_IPSR_MSEL(IP0_22_20, IIC2_SCL_C, SEL_IIC2_2),
856 PINMUX_IPSR_MODSEL_DATA(IP0_22_20, VI3_DATA6, SEL_VI3_0), 855 PINMUX_IPSR_MSEL(IP0_22_20, VI3_DATA6, SEL_VI3_0),
857 PINMUX_IPSR_MODSEL_DATA(IP0_22_20, VI0_R2, SEL_VI0_0), 856 PINMUX_IPSR_MSEL(IP0_22_20, VI0_R2, SEL_VI0_0),
858 PINMUX_IPSR_MODSEL_DATA(IP0_22_20, VI0_R2_B, SEL_VI0_1), 857 PINMUX_IPSR_MSEL(IP0_22_20, VI0_R2_B, SEL_VI0_1),
859 PINMUX_IPSR_MODSEL_DATA(IP0_22_20, I2C2_SCL_C, SEL_I2C2_2), 858 PINMUX_IPSR_MSEL(IP0_22_20, I2C2_SCL_C, SEL_I2C2_2),
860 PINMUX_IPSR_DATA(IP0_26_23, D7), 859 PINMUX_IPSR_DATA(IP0_26_23, D7),
861 PINMUX_IPSR_MODSEL_DATA(IP0_26_23, AD_DI_B, SEL_ADI_1), 860 PINMUX_IPSR_MSEL(IP0_26_23, AD_DI_B, SEL_ADI_1),
862 PINMUX_IPSR_MODSEL_DATA(IP0_26_23, IIC2_SDA_C, SEL_IIC2_2), 861 PINMUX_IPSR_MSEL(IP0_26_23, IIC2_SDA_C, SEL_IIC2_2),
863 PINMUX_IPSR_MODSEL_DATA(IP0_26_23, VI3_DATA7, SEL_VI3_0), 862 PINMUX_IPSR_MSEL(IP0_26_23, VI3_DATA7, SEL_VI3_0),
864 PINMUX_IPSR_MODSEL_DATA(IP0_26_23, VI0_R3, SEL_VI0_0), 863 PINMUX_IPSR_MSEL(IP0_26_23, VI0_R3, SEL_VI0_0),
865 PINMUX_IPSR_MODSEL_DATA(IP0_26_23, VI0_R3_B, SEL_VI0_1), 864 PINMUX_IPSR_MSEL(IP0_26_23, VI0_R3_B, SEL_VI0_1),
866 PINMUX_IPSR_MODSEL_DATA(IP0_26_23, I2C2_SDA_C, SEL_I2C2_2), 865 PINMUX_IPSR_MSEL(IP0_26_23, I2C2_SDA_C, SEL_I2C2_2),
867 PINMUX_IPSR_MODSEL_DATA(IP0_26_23, TCLK1, SEL_TMU1_0), 866 PINMUX_IPSR_MSEL(IP0_26_23, TCLK1, SEL_TMU1_0),
868 PINMUX_IPSR_DATA(IP0_30_27, D8), 867 PINMUX_IPSR_DATA(IP0_30_27, D8),
869 PINMUX_IPSR_MODSEL_DATA(IP0_30_27, SCIFA1_SCK_C, SEL_SCIFA1_2), 868 PINMUX_IPSR_MSEL(IP0_30_27, SCIFA1_SCK_C, SEL_SCIFA1_2),
870 PINMUX_IPSR_DATA(IP0_30_27, AVB_TXD0), 869 PINMUX_IPSR_DATA(IP0_30_27, AVB_TXD0),
871 PINMUX_IPSR_MODSEL_DATA(IP0_30_27, VI0_G0, SEL_VI0_0), 870 PINMUX_IPSR_MSEL(IP0_30_27, VI0_G0, SEL_VI0_0),
872 PINMUX_IPSR_MODSEL_DATA(IP0_30_27, VI0_G0_B, SEL_VI0_1), 871 PINMUX_IPSR_MSEL(IP0_30_27, VI0_G0_B, SEL_VI0_1),
873 PINMUX_IPSR_MODSEL_DATA(IP0_30_27, VI2_DATA0_VI2_B0, SEL_VI2_0), 872 PINMUX_IPSR_MSEL(IP0_30_27, VI2_DATA0_VI2_B0, SEL_VI2_0),
874 873
875 PINMUX_IPSR_DATA(IP1_3_0, D9), 874 PINMUX_IPSR_DATA(IP1_3_0, D9),
876 PINMUX_IPSR_MODSEL_DATA(IP1_3_0, SCIFA1_RXD_C, SEL_SCIFA1_2), 875 PINMUX_IPSR_MSEL(IP1_3_0, SCIFA1_RXD_C, SEL_SCIFA1_2),
877 PINMUX_IPSR_DATA(IP1_3_0, AVB_TXD1), 876 PINMUX_IPSR_DATA(IP1_3_0, AVB_TXD1),
878 PINMUX_IPSR_MODSEL_DATA(IP1_3_0, VI0_G1, SEL_VI0_0), 877 PINMUX_IPSR_MSEL(IP1_3_0, VI0_G1, SEL_VI0_0),
879 PINMUX_IPSR_MODSEL_DATA(IP1_3_0, VI0_G1_B, SEL_VI0_1), 878 PINMUX_IPSR_MSEL(IP1_3_0, VI0_G1_B, SEL_VI0_1),
880 PINMUX_IPSR_MODSEL_DATA(IP1_3_0, VI2_DATA1_VI2_B1, SEL_VI2_0), 879 PINMUX_IPSR_MSEL(IP1_3_0, VI2_DATA1_VI2_B1, SEL_VI2_0),
881 PINMUX_IPSR_DATA(IP1_7_4, D10), 880 PINMUX_IPSR_DATA(IP1_7_4, D10),
882 PINMUX_IPSR_MODSEL_DATA(IP1_7_4, SCIFA1_TXD_C, SEL_SCIFA1_2), 881 PINMUX_IPSR_MSEL(IP1_7_4, SCIFA1_TXD_C, SEL_SCIFA1_2),
883 PINMUX_IPSR_DATA(IP1_7_4, AVB_TXD2), 882 PINMUX_IPSR_DATA(IP1_7_4, AVB_TXD2),
884 PINMUX_IPSR_MODSEL_DATA(IP1_7_4, VI0_G2, SEL_VI0_0), 883 PINMUX_IPSR_MSEL(IP1_7_4, VI0_G2, SEL_VI0_0),
885 PINMUX_IPSR_MODSEL_DATA(IP1_7_4, VI0_G2_B, SEL_VI0_1), 884 PINMUX_IPSR_MSEL(IP1_7_4, VI0_G2_B, SEL_VI0_1),
886 PINMUX_IPSR_MODSEL_DATA(IP1_7_4, VI2_DATA2_VI2_B2, SEL_VI2_0), 885 PINMUX_IPSR_MSEL(IP1_7_4, VI2_DATA2_VI2_B2, SEL_VI2_0),
887 PINMUX_IPSR_DATA(IP1_11_8, D11), 886 PINMUX_IPSR_DATA(IP1_11_8, D11),
888 PINMUX_IPSR_MODSEL_DATA(IP1_11_8, SCIFA1_CTS_N_C, SEL_SCIFA1_2), 887 PINMUX_IPSR_MSEL(IP1_11_8, SCIFA1_CTS_N_C, SEL_SCIFA1_2),
889 PINMUX_IPSR_DATA(IP1_11_8, AVB_TXD3), 888 PINMUX_IPSR_DATA(IP1_11_8, AVB_TXD3),
890 PINMUX_IPSR_MODSEL_DATA(IP1_11_8, VI0_G3, SEL_VI0_0), 889 PINMUX_IPSR_MSEL(IP1_11_8, VI0_G3, SEL_VI0_0),
891 PINMUX_IPSR_MODSEL_DATA(IP1_11_8, VI0_G3_B, SEL_VI0_1), 890 PINMUX_IPSR_MSEL(IP1_11_8, VI0_G3_B, SEL_VI0_1),
892 PINMUX_IPSR_MODSEL_DATA(IP1_11_8, VI2_DATA3_VI2_B3, SEL_VI2_0), 891 PINMUX_IPSR_MSEL(IP1_11_8, VI2_DATA3_VI2_B3, SEL_VI2_0),
893 PINMUX_IPSR_DATA(IP1_14_12, D12), 892 PINMUX_IPSR_DATA(IP1_14_12, D12),
894 PINMUX_IPSR_MODSEL_DATA(IP1_14_12, SCIFA1_RTS_N_C, SEL_SCIFA1_2), 893 PINMUX_IPSR_MSEL(IP1_14_12, SCIFA1_RTS_N_C, SEL_SCIFA1_2),
895 PINMUX_IPSR_DATA(IP1_14_12, AVB_TXD4), 894 PINMUX_IPSR_DATA(IP1_14_12, AVB_TXD4),
896 PINMUX_IPSR_MODSEL_DATA(IP1_14_12, VI0_HSYNC_N, SEL_VI0_0), 895 PINMUX_IPSR_MSEL(IP1_14_12, VI0_HSYNC_N, SEL_VI0_0),
897 PINMUX_IPSR_MODSEL_DATA(IP1_14_12, VI0_HSYNC_N_B, SEL_VI0_1), 896 PINMUX_IPSR_MSEL(IP1_14_12, VI0_HSYNC_N_B, SEL_VI0_1),
898 PINMUX_IPSR_MODSEL_DATA(IP1_14_12, VI2_DATA4_VI2_B4, SEL_VI2_0), 897 PINMUX_IPSR_MSEL(IP1_14_12, VI2_DATA4_VI2_B4, SEL_VI2_0),
899 PINMUX_IPSR_DATA(IP1_17_15, D13), 898 PINMUX_IPSR_DATA(IP1_17_15, D13),
900 PINMUX_IPSR_DATA(IP1_17_15, AVB_TXD5), 899 PINMUX_IPSR_DATA(IP1_17_15, AVB_TXD5),
901 PINMUX_IPSR_MODSEL_DATA(IP1_17_15, VI0_VSYNC_N, SEL_VI0_0), 900 PINMUX_IPSR_MSEL(IP1_17_15, VI0_VSYNC_N, SEL_VI0_0),
902 PINMUX_IPSR_MODSEL_DATA(IP1_17_15, VI0_VSYNC_N_B, SEL_VI0_1), 901 PINMUX_IPSR_MSEL(IP1_17_15, VI0_VSYNC_N_B, SEL_VI0_1),
903 PINMUX_IPSR_MODSEL_DATA(IP1_17_15, VI2_DATA5_VI2_B5, SEL_VI2_0), 902 PINMUX_IPSR_MSEL(IP1_17_15, VI2_DATA5_VI2_B5, SEL_VI2_0),
904 PINMUX_IPSR_DATA(IP1_21_18, D14), 903 PINMUX_IPSR_DATA(IP1_21_18, D14),
905 PINMUX_IPSR_MODSEL_DATA(IP1_21_18, SCIFB1_RXD_C, SEL_SCIFB1_2), 904 PINMUX_IPSR_MSEL(IP1_21_18, SCIFB1_RXD_C, SEL_SCIFB1_2),
906 PINMUX_IPSR_DATA(IP1_21_18, AVB_TXD6), 905 PINMUX_IPSR_DATA(IP1_21_18, AVB_TXD6),
907 PINMUX_IPSR_MODSEL_DATA(IP1_21_18, RX1_B, SEL_SCIF1_1), 906 PINMUX_IPSR_MSEL(IP1_21_18, RX1_B, SEL_SCIF1_1),
908 PINMUX_IPSR_MODSEL_DATA(IP1_21_18, VI0_CLKENB, SEL_VI0_0), 907 PINMUX_IPSR_MSEL(IP1_21_18, VI0_CLKENB, SEL_VI0_0),
909 PINMUX_IPSR_MODSEL_DATA(IP1_21_18, VI0_CLKENB_B, SEL_VI0_1), 908 PINMUX_IPSR_MSEL(IP1_21_18, VI0_CLKENB_B, SEL_VI0_1),
910 PINMUX_IPSR_MODSEL_DATA(IP1_21_18, VI2_DATA6_VI2_B6, SEL_VI2_0), 909 PINMUX_IPSR_MSEL(IP1_21_18, VI2_DATA6_VI2_B6, SEL_VI2_0),
911 PINMUX_IPSR_DATA(IP1_25_22, D15), 910 PINMUX_IPSR_DATA(IP1_25_22, D15),
912 PINMUX_IPSR_MODSEL_DATA(IP1_25_22, SCIFB1_TXD_C, SEL_SCIFB1_2), 911 PINMUX_IPSR_MSEL(IP1_25_22, SCIFB1_TXD_C, SEL_SCIFB1_2),
913 PINMUX_IPSR_DATA(IP1_25_22, AVB_TXD7), 912 PINMUX_IPSR_DATA(IP1_25_22, AVB_TXD7),
914 PINMUX_IPSR_MODSEL_DATA(IP1_25_22, TX1_B, SEL_SCIF1_1), 913 PINMUX_IPSR_MSEL(IP1_25_22, TX1_B, SEL_SCIF1_1),
915 PINMUX_IPSR_MODSEL_DATA(IP1_25_22, VI0_FIELD, SEL_VI0_0), 914 PINMUX_IPSR_MSEL(IP1_25_22, VI0_FIELD, SEL_VI0_0),
916 PINMUX_IPSR_MODSEL_DATA(IP1_25_22, VI0_FIELD_B, SEL_VI0_1), 915 PINMUX_IPSR_MSEL(IP1_25_22, VI0_FIELD_B, SEL_VI0_1),
917 PINMUX_IPSR_MODSEL_DATA(IP1_25_22, VI2_DATA7_VI2_B7, SEL_VI2_0), 916 PINMUX_IPSR_MSEL(IP1_25_22, VI2_DATA7_VI2_B7, SEL_VI2_0),
918 PINMUX_IPSR_DATA(IP1_27_26, A0), 917 PINMUX_IPSR_DATA(IP1_27_26, A0),
919 PINMUX_IPSR_DATA(IP1_27_26, PWM3), 918 PINMUX_IPSR_DATA(IP1_27_26, PWM3),
920 PINMUX_IPSR_DATA(IP1_29_28, A1), 919 PINMUX_IPSR_DATA(IP1_29_28, A1),
@@ -922,512 +921,512 @@ static const u16 pinmux_data[] = {
922 921
923 PINMUX_IPSR_DATA(IP2_2_0, A2), 922 PINMUX_IPSR_DATA(IP2_2_0, A2),
924 PINMUX_IPSR_DATA(IP2_2_0, PWM5), 923 PINMUX_IPSR_DATA(IP2_2_0, PWM5),
925 PINMUX_IPSR_MODSEL_DATA(IP2_2_0, MSIOF1_SS1_B, SEL_SOF1_1), 924 PINMUX_IPSR_MSEL(IP2_2_0, MSIOF1_SS1_B, SEL_SOF1_1),
926 PINMUX_IPSR_DATA(IP2_5_3, A3), 925 PINMUX_IPSR_DATA(IP2_5_3, A3),
927 PINMUX_IPSR_DATA(IP2_5_3, PWM6), 926 PINMUX_IPSR_DATA(IP2_5_3, PWM6),
928 PINMUX_IPSR_MODSEL_DATA(IP2_5_3, MSIOF1_SS2_B, SEL_SOF1_1), 927 PINMUX_IPSR_MSEL(IP2_5_3, MSIOF1_SS2_B, SEL_SOF1_1),
929 PINMUX_IPSR_DATA(IP2_8_6, A4), 928 PINMUX_IPSR_DATA(IP2_8_6, A4),
930 PINMUX_IPSR_MODSEL_DATA(IP2_8_6, MSIOF1_TXD_B, SEL_SOF1_1), 929 PINMUX_IPSR_MSEL(IP2_8_6, MSIOF1_TXD_B, SEL_SOF1_1),
931 PINMUX_IPSR_DATA(IP2_8_6, TPU0TO0), 930 PINMUX_IPSR_DATA(IP2_8_6, TPU0TO0),
932 PINMUX_IPSR_DATA(IP2_11_9, A5), 931 PINMUX_IPSR_DATA(IP2_11_9, A5),
933 PINMUX_IPSR_MODSEL_DATA(IP2_11_9, SCIFA1_TXD_B, SEL_SCIFA1_1), 932 PINMUX_IPSR_MSEL(IP2_11_9, SCIFA1_TXD_B, SEL_SCIFA1_1),
934 PINMUX_IPSR_DATA(IP2_11_9, TPU0TO1), 933 PINMUX_IPSR_DATA(IP2_11_9, TPU0TO1),
935 PINMUX_IPSR_DATA(IP2_14_12, A6), 934 PINMUX_IPSR_DATA(IP2_14_12, A6),
936 PINMUX_IPSR_MODSEL_DATA(IP2_14_12, SCIFA1_RTS_N_B, SEL_SCIFA1_1), 935 PINMUX_IPSR_MSEL(IP2_14_12, SCIFA1_RTS_N_B, SEL_SCIFA1_1),
937 PINMUX_IPSR_DATA(IP2_14_12, TPU0TO2), 936 PINMUX_IPSR_DATA(IP2_14_12, TPU0TO2),
938 PINMUX_IPSR_DATA(IP2_17_15, A7), 937 PINMUX_IPSR_DATA(IP2_17_15, A7),
939 PINMUX_IPSR_MODSEL_DATA(IP2_17_15, SCIFA1_SCK_B, SEL_SCIFA1_1), 938 PINMUX_IPSR_MSEL(IP2_17_15, SCIFA1_SCK_B, SEL_SCIFA1_1),
940 PINMUX_IPSR_DATA(IP2_17_15, AUDIO_CLKOUT_B), 939 PINMUX_IPSR_DATA(IP2_17_15, AUDIO_CLKOUT_B),
941 PINMUX_IPSR_DATA(IP2_17_15, TPU0TO3), 940 PINMUX_IPSR_DATA(IP2_17_15, TPU0TO3),
942 PINMUX_IPSR_DATA(IP2_21_18, A8), 941 PINMUX_IPSR_DATA(IP2_21_18, A8),
943 PINMUX_IPSR_MODSEL_DATA(IP2_21_18, SCIFA1_RXD_B, SEL_SCIFA1_1), 942 PINMUX_IPSR_MSEL(IP2_21_18, SCIFA1_RXD_B, SEL_SCIFA1_1),
944 PINMUX_IPSR_MODSEL_DATA(IP2_21_18, SSI_SCK5_B, SEL_SSI5_1), 943 PINMUX_IPSR_MSEL(IP2_21_18, SSI_SCK5_B, SEL_SSI5_1),
945 PINMUX_IPSR_MODSEL_DATA(IP2_21_18, VI0_R4, SEL_VI0_0), 944 PINMUX_IPSR_MSEL(IP2_21_18, VI0_R4, SEL_VI0_0),
946 PINMUX_IPSR_MODSEL_DATA(IP2_21_18, VI0_R4_B, SEL_VI0_1), 945 PINMUX_IPSR_MSEL(IP2_21_18, VI0_R4_B, SEL_VI0_1),
947 PINMUX_IPSR_MODSEL_DATA(IP2_21_18, SCIFB2_RXD_C, SEL_SCIFB2_2), 946 PINMUX_IPSR_MSEL(IP2_21_18, SCIFB2_RXD_C, SEL_SCIFB2_2),
948 PINMUX_IPSR_MODSEL_DATA(IP2_21_18, RX2_B, SEL_SCIF2_1), 947 PINMUX_IPSR_MSEL(IP2_21_18, RX2_B, SEL_SCIF2_1),
949 PINMUX_IPSR_MODSEL_DATA(IP2_21_18, VI2_DATA0_VI2_B0_B, SEL_VI2_1), 948 PINMUX_IPSR_MSEL(IP2_21_18, VI2_DATA0_VI2_B0_B, SEL_VI2_1),
950 PINMUX_IPSR_DATA(IP2_25_22, A9), 949 PINMUX_IPSR_DATA(IP2_25_22, A9),
951 PINMUX_IPSR_MODSEL_DATA(IP2_25_22, SCIFA1_CTS_N_B, SEL_SCIFA1_1), 950 PINMUX_IPSR_MSEL(IP2_25_22, SCIFA1_CTS_N_B, SEL_SCIFA1_1),
952 PINMUX_IPSR_MODSEL_DATA(IP2_25_22, SSI_WS5_B, SEL_SSI5_1), 951 PINMUX_IPSR_MSEL(IP2_25_22, SSI_WS5_B, SEL_SSI5_1),
953 PINMUX_IPSR_MODSEL_DATA(IP2_25_22, VI0_R5, SEL_VI0_0), 952 PINMUX_IPSR_MSEL(IP2_25_22, VI0_R5, SEL_VI0_0),
954 PINMUX_IPSR_MODSEL_DATA(IP2_25_22, VI0_R5_B, SEL_VI0_1), 953 PINMUX_IPSR_MSEL(IP2_25_22, VI0_R5_B, SEL_VI0_1),
955 PINMUX_IPSR_MODSEL_DATA(IP2_25_22, SCIFB2_TXD_C, SEL_SCIFB2_2), 954 PINMUX_IPSR_MSEL(IP2_25_22, SCIFB2_TXD_C, SEL_SCIFB2_2),
956 PINMUX_IPSR_MODSEL_DATA(IP2_25_22, TX2_B, SEL_SCIF2_1), 955 PINMUX_IPSR_MSEL(IP2_25_22, TX2_B, SEL_SCIF2_1),
957 PINMUX_IPSR_MODSEL_DATA(IP2_25_22, VI2_DATA1_VI2_B1_B, SEL_VI2_1), 956 PINMUX_IPSR_MSEL(IP2_25_22, VI2_DATA1_VI2_B1_B, SEL_VI2_1),
958 PINMUX_IPSR_DATA(IP2_28_26, A10), 957 PINMUX_IPSR_DATA(IP2_28_26, A10),
959 PINMUX_IPSR_MODSEL_DATA(IP2_28_26, SSI_SDATA5_B, SEL_SSI5_1), 958 PINMUX_IPSR_MSEL(IP2_28_26, SSI_SDATA5_B, SEL_SSI5_1),
960 PINMUX_IPSR_DATA(IP2_28_26, MSIOF2_SYNC), 959 PINMUX_IPSR_DATA(IP2_28_26, MSIOF2_SYNC),
961 PINMUX_IPSR_MODSEL_DATA(IP2_28_26, VI0_R6, SEL_VI0_0), 960 PINMUX_IPSR_MSEL(IP2_28_26, VI0_R6, SEL_VI0_0),
962 PINMUX_IPSR_MODSEL_DATA(IP2_28_26, VI0_R6_B, SEL_VI0_1), 961 PINMUX_IPSR_MSEL(IP2_28_26, VI0_R6_B, SEL_VI0_1),
963 PINMUX_IPSR_MODSEL_DATA(IP2_28_26, VI2_DATA2_VI2_B2_B, SEL_VI2_1), 962 PINMUX_IPSR_MSEL(IP2_28_26, VI2_DATA2_VI2_B2_B, SEL_VI2_1),
964 963
965 PINMUX_IPSR_DATA(IP3_3_0, A11), 964 PINMUX_IPSR_DATA(IP3_3_0, A11),
966 PINMUX_IPSR_MODSEL_DATA(IP3_3_0, SCIFB2_CTS_N_B, SEL_SCIFB2_1), 965 PINMUX_IPSR_MSEL(IP3_3_0, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
967 PINMUX_IPSR_DATA(IP3_3_0, MSIOF2_SCK), 966 PINMUX_IPSR_DATA(IP3_3_0, MSIOF2_SCK),
968 PINMUX_IPSR_MODSEL_DATA(IP3_3_0, VI1_R0, SEL_VI1_0), 967 PINMUX_IPSR_MSEL(IP3_3_0, VI1_R0, SEL_VI1_0),
969 PINMUX_IPSR_MODSEL_DATA(IP3_3_0, VI1_R0_B, SEL_VI1_1), 968 PINMUX_IPSR_MSEL(IP3_3_0, VI1_R0_B, SEL_VI1_1),
970 PINMUX_IPSR_DATA(IP3_3_0, VI2_G0), 969 PINMUX_IPSR_DATA(IP3_3_0, VI2_G0),
971 PINMUX_IPSR_MODSEL_DATA(IP3_3_0, VI2_DATA3_VI2_B3_B, SEL_VI2_1), 970 PINMUX_IPSR_MSEL(IP3_3_0, VI2_DATA3_VI2_B3_B, SEL_VI2_1),
972 PINMUX_IPSR_DATA(IP3_7_4, A12), 971 PINMUX_IPSR_DATA(IP3_7_4, A12),
973 PINMUX_IPSR_MODSEL_DATA(IP3_7_4, SCIFB2_RXD_B, SEL_SCIFB2_1), 972 PINMUX_IPSR_MSEL(IP3_7_4, SCIFB2_RXD_B, SEL_SCIFB2_1),
974 PINMUX_IPSR_DATA(IP3_7_4, MSIOF2_TXD), 973 PINMUX_IPSR_DATA(IP3_7_4, MSIOF2_TXD),
975 PINMUX_IPSR_MODSEL_DATA(IP3_7_4, VI1_R1, SEL_VI1_0), 974 PINMUX_IPSR_MSEL(IP3_7_4, VI1_R1, SEL_VI1_0),
976 PINMUX_IPSR_MODSEL_DATA(IP3_7_4, VI1_R1_B, SEL_VI1_1), 975 PINMUX_IPSR_MSEL(IP3_7_4, VI1_R1_B, SEL_VI1_1),
977 PINMUX_IPSR_DATA(IP3_7_4, VI2_G1), 976 PINMUX_IPSR_DATA(IP3_7_4, VI2_G1),
978 PINMUX_IPSR_MODSEL_DATA(IP3_7_4, VI2_DATA4_VI2_B4_B, SEL_VI2_1), 977 PINMUX_IPSR_MSEL(IP3_7_4, VI2_DATA4_VI2_B4_B, SEL_VI2_1),
979 PINMUX_IPSR_DATA(IP3_11_8, A13), 978 PINMUX_IPSR_DATA(IP3_11_8, A13),
980 PINMUX_IPSR_MODSEL_DATA(IP3_11_8, SCIFB2_RTS_N_B, SEL_SCIFB2_1), 979 PINMUX_IPSR_MSEL(IP3_11_8, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
981 PINMUX_IPSR_DATA(IP3_11_8, EX_WAIT2), 980 PINMUX_IPSR_DATA(IP3_11_8, EX_WAIT2),
982 PINMUX_IPSR_DATA(IP3_11_8, MSIOF2_RXD), 981 PINMUX_IPSR_DATA(IP3_11_8, MSIOF2_RXD),
983 PINMUX_IPSR_MODSEL_DATA(IP3_11_8, VI1_R2, SEL_VI1_0), 982 PINMUX_IPSR_MSEL(IP3_11_8, VI1_R2, SEL_VI1_0),
984 PINMUX_IPSR_MODSEL_DATA(IP3_11_8, VI1_R2_B, SEL_VI1_1), 983 PINMUX_IPSR_MSEL(IP3_11_8, VI1_R2_B, SEL_VI1_1),
985 PINMUX_IPSR_DATA(IP3_11_8, VI2_G2), 984 PINMUX_IPSR_DATA(IP3_11_8, VI2_G2),
986 PINMUX_IPSR_MODSEL_DATA(IP3_11_8, VI2_DATA5_VI2_B5_B, SEL_VI2_1), 985 PINMUX_IPSR_MSEL(IP3_11_8, VI2_DATA5_VI2_B5_B, SEL_VI2_1),
987 PINMUX_IPSR_DATA(IP3_14_12, A14), 986 PINMUX_IPSR_DATA(IP3_14_12, A14),
988 PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SCIFB2_TXD_B, SEL_SCIFB2_1), 987 PINMUX_IPSR_MSEL(IP3_14_12, SCIFB2_TXD_B, SEL_SCIFB2_1),
989 PINMUX_IPSR_DATA(IP3_14_12, ATACS11_N), 988 PINMUX_IPSR_DATA(IP3_14_12, ATACS11_N),
990 PINMUX_IPSR_DATA(IP3_14_12, MSIOF2_SS1), 989 PINMUX_IPSR_DATA(IP3_14_12, MSIOF2_SS1),
991 PINMUX_IPSR_DATA(IP3_17_15, A15), 990 PINMUX_IPSR_DATA(IP3_17_15, A15),
992 PINMUX_IPSR_MODSEL_DATA(IP3_17_15, SCIFB2_SCK_B, SEL_SCIFB2_1), 991 PINMUX_IPSR_MSEL(IP3_17_15, SCIFB2_SCK_B, SEL_SCIFB2_1),
993 PINMUX_IPSR_DATA(IP3_17_15, ATARD1_N), 992 PINMUX_IPSR_DATA(IP3_17_15, ATARD1_N),
994 PINMUX_IPSR_DATA(IP3_17_15, MSIOF2_SS2), 993 PINMUX_IPSR_DATA(IP3_17_15, MSIOF2_SS2),
995 PINMUX_IPSR_DATA(IP3_19_18, A16), 994 PINMUX_IPSR_DATA(IP3_19_18, A16),
996 PINMUX_IPSR_DATA(IP3_19_18, ATAWR1_N), 995 PINMUX_IPSR_DATA(IP3_19_18, ATAWR1_N),
997 PINMUX_IPSR_DATA(IP3_22_20, A17), 996 PINMUX_IPSR_DATA(IP3_22_20, A17),
998 PINMUX_IPSR_MODSEL_DATA(IP3_22_20, AD_DO_B, SEL_ADI_1), 997 PINMUX_IPSR_MSEL(IP3_22_20, AD_DO_B, SEL_ADI_1),
999 PINMUX_IPSR_DATA(IP3_22_20, ATADIR1_N), 998 PINMUX_IPSR_DATA(IP3_22_20, ATADIR1_N),
1000 PINMUX_IPSR_DATA(IP3_25_23, A18), 999 PINMUX_IPSR_DATA(IP3_25_23, A18),
1001 PINMUX_IPSR_MODSEL_DATA(IP3_25_23, AD_CLK_B, SEL_ADI_1), 1000 PINMUX_IPSR_MSEL(IP3_25_23, AD_CLK_B, SEL_ADI_1),
1002 PINMUX_IPSR_DATA(IP3_25_23, ATAG1_N), 1001 PINMUX_IPSR_DATA(IP3_25_23, ATAG1_N),
1003 PINMUX_IPSR_DATA(IP3_28_26, A19), 1002 PINMUX_IPSR_DATA(IP3_28_26, A19),
1004 PINMUX_IPSR_MODSEL_DATA(IP3_28_26, AD_NCS_N_B, SEL_ADI_1), 1003 PINMUX_IPSR_MSEL(IP3_28_26, AD_NCS_N_B, SEL_ADI_1),
1005 PINMUX_IPSR_DATA(IP3_28_26, ATACS01_N), 1004 PINMUX_IPSR_DATA(IP3_28_26, ATACS01_N),
1006 PINMUX_IPSR_MODSEL_DATA(IP3_28_26, EX_WAIT0_B, SEL_LBS_1), 1005 PINMUX_IPSR_MSEL(IP3_28_26, EX_WAIT0_B, SEL_LBS_1),
1007 PINMUX_IPSR_DATA(IP3_31_29, A20), 1006 PINMUX_IPSR_DATA(IP3_31_29, A20),
1008 PINMUX_IPSR_DATA(IP3_31_29, SPCLK), 1007 PINMUX_IPSR_DATA(IP3_31_29, SPCLK),
1009 PINMUX_IPSR_MODSEL_DATA(IP3_31_29, VI1_R3, SEL_VI1_0), 1008 PINMUX_IPSR_MSEL(IP3_31_29, VI1_R3, SEL_VI1_0),
1010 PINMUX_IPSR_MODSEL_DATA(IP3_31_29, VI1_R3_B, SEL_VI1_1), 1009 PINMUX_IPSR_MSEL(IP3_31_29, VI1_R3_B, SEL_VI1_1),
1011 PINMUX_IPSR_DATA(IP3_31_29, VI2_G4), 1010 PINMUX_IPSR_DATA(IP3_31_29, VI2_G4),
1012 1011
1013 PINMUX_IPSR_DATA(IP4_2_0, A21), 1012 PINMUX_IPSR_DATA(IP4_2_0, A21),
1014 PINMUX_IPSR_DATA(IP4_2_0, MOSI_IO0), 1013 PINMUX_IPSR_DATA(IP4_2_0, MOSI_IO0),
1015 PINMUX_IPSR_MODSEL_DATA(IP4_2_0, VI1_R4, SEL_VI1_0), 1014 PINMUX_IPSR_MSEL(IP4_2_0, VI1_R4, SEL_VI1_0),
1016 PINMUX_IPSR_MODSEL_DATA(IP4_2_0, VI1_R4_B, SEL_VI1_1), 1015 PINMUX_IPSR_MSEL(IP4_2_0, VI1_R4_B, SEL_VI1_1),
1017 PINMUX_IPSR_DATA(IP4_2_0, VI2_G5), 1016 PINMUX_IPSR_DATA(IP4_2_0, VI2_G5),
1018 PINMUX_IPSR_DATA(IP4_5_3, A22), 1017 PINMUX_IPSR_DATA(IP4_5_3, A22),
1019 PINMUX_IPSR_DATA(IP4_5_3, MISO_IO1), 1018 PINMUX_IPSR_DATA(IP4_5_3, MISO_IO1),
1020 PINMUX_IPSR_MODSEL_DATA(IP4_5_3, VI1_R5, SEL_VI1_0), 1019 PINMUX_IPSR_MSEL(IP4_5_3, VI1_R5, SEL_VI1_0),
1021 PINMUX_IPSR_MODSEL_DATA(IP4_5_3, VI1_R5_B, SEL_VI1_1), 1020 PINMUX_IPSR_MSEL(IP4_5_3, VI1_R5_B, SEL_VI1_1),
1022 PINMUX_IPSR_DATA(IP4_5_3, VI2_G6), 1021 PINMUX_IPSR_DATA(IP4_5_3, VI2_G6),
1023 PINMUX_IPSR_DATA(IP4_8_6, A23), 1022 PINMUX_IPSR_DATA(IP4_8_6, A23),
1024 PINMUX_IPSR_DATA(IP4_8_6, IO2), 1023 PINMUX_IPSR_DATA(IP4_8_6, IO2),
1025 PINMUX_IPSR_MODSEL_DATA(IP4_8_6, VI1_G7, SEL_VI1_0), 1024 PINMUX_IPSR_MSEL(IP4_8_6, VI1_G7, SEL_VI1_0),
1026 PINMUX_IPSR_MODSEL_DATA(IP4_8_6, VI1_G7_B, SEL_VI1_1), 1025 PINMUX_IPSR_MSEL(IP4_8_6, VI1_G7_B, SEL_VI1_1),
1027 PINMUX_IPSR_DATA(IP4_8_6, VI2_G7), 1026 PINMUX_IPSR_DATA(IP4_8_6, VI2_G7),
1028 PINMUX_IPSR_DATA(IP4_11_9, A24), 1027 PINMUX_IPSR_DATA(IP4_11_9, A24),
1029 PINMUX_IPSR_DATA(IP4_11_9, IO3), 1028 PINMUX_IPSR_DATA(IP4_11_9, IO3),
1030 PINMUX_IPSR_MODSEL_DATA(IP4_11_9, VI1_R7, SEL_VI1_0), 1029 PINMUX_IPSR_MSEL(IP4_11_9, VI1_R7, SEL_VI1_0),
1031 PINMUX_IPSR_MODSEL_DATA(IP4_11_9, VI1_R7_B, SEL_VI1_1), 1030 PINMUX_IPSR_MSEL(IP4_11_9, VI1_R7_B, SEL_VI1_1),
1032 PINMUX_IPSR_MODSEL_DATA(IP4_11_9, VI2_CLKENB, SEL_VI2_0), 1031 PINMUX_IPSR_MSEL(IP4_11_9, VI2_CLKENB, SEL_VI2_0),
1033 PINMUX_IPSR_MODSEL_DATA(IP4_11_9, VI2_CLKENB_B, SEL_VI2_1), 1032 PINMUX_IPSR_MSEL(IP4_11_9, VI2_CLKENB_B, SEL_VI2_1),
1034 PINMUX_IPSR_DATA(IP4_14_12, A25), 1033 PINMUX_IPSR_DATA(IP4_14_12, A25),
1035 PINMUX_IPSR_DATA(IP4_14_12, SSL), 1034 PINMUX_IPSR_DATA(IP4_14_12, SSL),
1036 PINMUX_IPSR_MODSEL_DATA(IP4_14_12, VI1_G6, SEL_VI1_0), 1035 PINMUX_IPSR_MSEL(IP4_14_12, VI1_G6, SEL_VI1_0),
1037 PINMUX_IPSR_MODSEL_DATA(IP4_14_12, VI1_G6_B, SEL_VI1_1), 1036 PINMUX_IPSR_MSEL(IP4_14_12, VI1_G6_B, SEL_VI1_1),
1038 PINMUX_IPSR_MODSEL_DATA(IP4_14_12, VI2_FIELD, SEL_VI2_0), 1037 PINMUX_IPSR_MSEL(IP4_14_12, VI2_FIELD, SEL_VI2_0),
1039 PINMUX_IPSR_MODSEL_DATA(IP4_14_12, VI2_FIELD_B, SEL_VI2_1), 1038 PINMUX_IPSR_MSEL(IP4_14_12, VI2_FIELD_B, SEL_VI2_1),
1040 PINMUX_IPSR_DATA(IP4_17_15, CS0_N), 1039 PINMUX_IPSR_DATA(IP4_17_15, CS0_N),
1041 PINMUX_IPSR_MODSEL_DATA(IP4_17_15, VI1_R6, SEL_VI1_0), 1040 PINMUX_IPSR_MSEL(IP4_17_15, VI1_R6, SEL_VI1_0),
1042 PINMUX_IPSR_MODSEL_DATA(IP4_17_15, VI1_R6_B, SEL_VI1_1), 1041 PINMUX_IPSR_MSEL(IP4_17_15, VI1_R6_B, SEL_VI1_1),
1043 PINMUX_IPSR_DATA(IP4_17_15, VI2_G3), 1042 PINMUX_IPSR_DATA(IP4_17_15, VI2_G3),
1044 PINMUX_IPSR_MODSEL_DATA(IP4_17_15, MSIOF0_SS2_B, SEL_SOF0_1), 1043 PINMUX_IPSR_MSEL(IP4_17_15, MSIOF0_SS2_B, SEL_SOF0_1),
1045 PINMUX_IPSR_DATA(IP4_20_18, CS1_N_A26), 1044 PINMUX_IPSR_DATA(IP4_20_18, CS1_N_A26),
1046 PINMUX_IPSR_DATA(IP4_20_18, SPEEDIN), 1045 PINMUX_IPSR_DATA(IP4_20_18, SPEEDIN),
1047 PINMUX_IPSR_MODSEL_DATA(IP4_20_18, VI0_R7, SEL_VI0_0), 1046 PINMUX_IPSR_MSEL(IP4_20_18, VI0_R7, SEL_VI0_0),
1048 PINMUX_IPSR_MODSEL_DATA(IP4_20_18, VI0_R7_B, SEL_VI0_1), 1047 PINMUX_IPSR_MSEL(IP4_20_18, VI0_R7_B, SEL_VI0_1),
1049 PINMUX_IPSR_MODSEL_DATA(IP4_20_18, VI2_CLK, SEL_VI2_0), 1048 PINMUX_IPSR_MSEL(IP4_20_18, VI2_CLK, SEL_VI2_0),
1050 PINMUX_IPSR_MODSEL_DATA(IP4_20_18, VI2_CLK_B, SEL_VI2_1), 1049 PINMUX_IPSR_MSEL(IP4_20_18, VI2_CLK_B, SEL_VI2_1),
1051 PINMUX_IPSR_DATA(IP4_23_21, EX_CS0_N), 1050 PINMUX_IPSR_DATA(IP4_23_21, EX_CS0_N),
1052 PINMUX_IPSR_MODSEL_DATA(IP4_23_21, HRX1_B, SEL_HSCIF1_1), 1051 PINMUX_IPSR_MSEL(IP4_23_21, HRX1_B, SEL_HSCIF1_1),
1053 PINMUX_IPSR_MODSEL_DATA(IP4_23_21, VI1_G5, SEL_VI1_0), 1052 PINMUX_IPSR_MSEL(IP4_23_21, VI1_G5, SEL_VI1_0),
1054 PINMUX_IPSR_MODSEL_DATA(IP4_23_21, VI1_G5_B, SEL_VI1_1), 1053 PINMUX_IPSR_MSEL(IP4_23_21, VI1_G5_B, SEL_VI1_1),
1055 PINMUX_IPSR_DATA(IP4_23_21, VI2_R0), 1054 PINMUX_IPSR_DATA(IP4_23_21, VI2_R0),
1056 PINMUX_IPSR_MODSEL_DATA(IP4_23_21, HTX0_B, SEL_HSCIF0_1), 1055 PINMUX_IPSR_MSEL(IP4_23_21, HTX0_B, SEL_HSCIF0_1),
1057 PINMUX_IPSR_MODSEL_DATA(IP4_23_21, MSIOF0_SS1_B, SEL_SOF0_1), 1056 PINMUX_IPSR_MSEL(IP4_23_21, MSIOF0_SS1_B, SEL_SOF0_1),
1058 PINMUX_IPSR_DATA(IP4_26_24, EX_CS1_N), 1057 PINMUX_IPSR_DATA(IP4_26_24, EX_CS1_N),
1059 PINMUX_IPSR_DATA(IP4_26_24, GPS_CLK), 1058 PINMUX_IPSR_DATA(IP4_26_24, GPS_CLK),
1060 PINMUX_IPSR_MODSEL_DATA(IP4_26_24, HCTS1_N_B, SEL_HSCIF1_1), 1059 PINMUX_IPSR_MSEL(IP4_26_24, HCTS1_N_B, SEL_HSCIF1_1),
1061 PINMUX_IPSR_MODSEL_DATA(IP4_26_24, VI1_FIELD, SEL_VI1_0), 1060 PINMUX_IPSR_MSEL(IP4_26_24, VI1_FIELD, SEL_VI1_0),
1062 PINMUX_IPSR_MODSEL_DATA(IP4_26_24, VI1_FIELD_B, SEL_VI1_1), 1061 PINMUX_IPSR_MSEL(IP4_26_24, VI1_FIELD_B, SEL_VI1_1),
1063 PINMUX_IPSR_DATA(IP4_26_24, VI2_R1), 1062 PINMUX_IPSR_DATA(IP4_26_24, VI2_R1),
1064 PINMUX_IPSR_DATA(IP4_29_27, EX_CS2_N), 1063 PINMUX_IPSR_DATA(IP4_29_27, EX_CS2_N),
1065 PINMUX_IPSR_DATA(IP4_29_27, GPS_SIGN), 1064 PINMUX_IPSR_DATA(IP4_29_27, GPS_SIGN),
1066 PINMUX_IPSR_MODSEL_DATA(IP4_29_27, HRTS1_N_B, SEL_HSCIF1_1), 1065 PINMUX_IPSR_MSEL(IP4_29_27, HRTS1_N_B, SEL_HSCIF1_1),
1067 PINMUX_IPSR_DATA(IP4_29_27, VI3_CLKENB), 1066 PINMUX_IPSR_DATA(IP4_29_27, VI3_CLKENB),
1068 PINMUX_IPSR_MODSEL_DATA(IP4_29_27, VI1_G0, SEL_VI1_0), 1067 PINMUX_IPSR_MSEL(IP4_29_27, VI1_G0, SEL_VI1_0),
1069 PINMUX_IPSR_MODSEL_DATA(IP4_29_27, VI1_G0_B, SEL_VI1_1), 1068 PINMUX_IPSR_MSEL(IP4_29_27, VI1_G0_B, SEL_VI1_1),
1070 PINMUX_IPSR_DATA(IP4_29_27, VI2_R2), 1069 PINMUX_IPSR_DATA(IP4_29_27, VI2_R2),
1071 1070
1072 PINMUX_IPSR_DATA(IP5_2_0, EX_CS3_N), 1071 PINMUX_IPSR_DATA(IP5_2_0, EX_CS3_N),
1073 PINMUX_IPSR_DATA(IP5_2_0, GPS_MAG), 1072 PINMUX_IPSR_DATA(IP5_2_0, GPS_MAG),
1074 PINMUX_IPSR_DATA(IP5_2_0, VI3_FIELD), 1073 PINMUX_IPSR_DATA(IP5_2_0, VI3_FIELD),
1075 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, VI1_G1, SEL_VI1_0), 1074 PINMUX_IPSR_MSEL(IP5_2_0, VI1_G1, SEL_VI1_0),
1076 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, VI1_G1_B, SEL_VI1_1), 1075 PINMUX_IPSR_MSEL(IP5_2_0, VI1_G1_B, SEL_VI1_1),
1077 PINMUX_IPSR_DATA(IP5_2_0, VI2_R3), 1076 PINMUX_IPSR_DATA(IP5_2_0, VI2_R3),
1078 PINMUX_IPSR_DATA(IP5_5_3, EX_CS4_N), 1077 PINMUX_IPSR_DATA(IP5_5_3, EX_CS4_N),
1079 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, MSIOF1_SCK_B, SEL_SOF1_1), 1078 PINMUX_IPSR_MSEL(IP5_5_3, MSIOF1_SCK_B, SEL_SOF1_1),
1080 PINMUX_IPSR_DATA(IP5_5_3, VI3_HSYNC_N), 1079 PINMUX_IPSR_DATA(IP5_5_3, VI3_HSYNC_N),
1081 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, VI2_HSYNC_N, SEL_VI2_0), 1080 PINMUX_IPSR_MSEL(IP5_5_3, VI2_HSYNC_N, SEL_VI2_0),
1082 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, IIC1_SCL, SEL_IIC1_0), 1081 PINMUX_IPSR_MSEL(IP5_5_3, IIC1_SCL, SEL_IIC1_0),
1083 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, VI2_HSYNC_N_B, SEL_VI2_1), 1082 PINMUX_IPSR_MSEL(IP5_5_3, VI2_HSYNC_N_B, SEL_VI2_1),
1084 PINMUX_IPSR_DATA(IP5_5_3, INTC_EN0_N), 1083 PINMUX_IPSR_DATA(IP5_5_3, INTC_EN0_N),
1085 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, I2C1_SCL, SEL_I2C1_0), 1084 PINMUX_IPSR_MSEL(IP5_5_3, I2C1_SCL, SEL_I2C1_0),
1086 PINMUX_IPSR_DATA(IP5_9_6, EX_CS5_N), 1085 PINMUX_IPSR_DATA(IP5_9_6, EX_CS5_N),
1087 PINMUX_IPSR_MODSEL_DATA(IP5_9_6, CAN0_RX, SEL_CAN0_0), 1086 PINMUX_IPSR_MSEL(IP5_9_6, CAN0_RX, SEL_CAN0_0),
1088 PINMUX_IPSR_MODSEL_DATA(IP5_9_6, MSIOF1_RXD_B, SEL_SOF1_1), 1087 PINMUX_IPSR_MSEL(IP5_9_6, MSIOF1_RXD_B, SEL_SOF1_1),
1089 PINMUX_IPSR_DATA(IP5_9_6, VI3_VSYNC_N), 1088 PINMUX_IPSR_DATA(IP5_9_6, VI3_VSYNC_N),
1090 PINMUX_IPSR_MODSEL_DATA(IP5_9_6, VI1_G2, SEL_VI1_0), 1089 PINMUX_IPSR_MSEL(IP5_9_6, VI1_G2, SEL_VI1_0),
1091 PINMUX_IPSR_MODSEL_DATA(IP5_9_6, VI1_G2_B, SEL_VI1_1), 1090 PINMUX_IPSR_MSEL(IP5_9_6, VI1_G2_B, SEL_VI1_1),
1092 PINMUX_IPSR_DATA(IP5_9_6, VI2_R4), 1091 PINMUX_IPSR_DATA(IP5_9_6, VI2_R4),
1093 PINMUX_IPSR_MODSEL_DATA(IP5_9_6, IIC1_SDA, SEL_IIC1_0), 1092 PINMUX_IPSR_MSEL(IP5_9_6, IIC1_SDA, SEL_IIC1_0),
1094 PINMUX_IPSR_DATA(IP5_9_6, INTC_EN1_N), 1093 PINMUX_IPSR_DATA(IP5_9_6, INTC_EN1_N),
1095 PINMUX_IPSR_MODSEL_DATA(IP5_9_6, I2C1_SDA, SEL_I2C1_0), 1094 PINMUX_IPSR_MSEL(IP5_9_6, I2C1_SDA, SEL_I2C1_0),
1096 PINMUX_IPSR_DATA(IP5_12_10, BS_N), 1095 PINMUX_IPSR_DATA(IP5_12_10, BS_N),
1097 PINMUX_IPSR_MODSEL_DATA(IP5_12_10, IETX, SEL_IEB_0), 1096 PINMUX_IPSR_MSEL(IP5_12_10, IETX, SEL_IEB_0),
1098 PINMUX_IPSR_MODSEL_DATA(IP5_12_10, HTX1_B, SEL_HSCIF1_1), 1097 PINMUX_IPSR_MSEL(IP5_12_10, HTX1_B, SEL_HSCIF1_1),
1099 PINMUX_IPSR_MODSEL_DATA(IP5_12_10, CAN1_TX, SEL_CAN1_0), 1098 PINMUX_IPSR_MSEL(IP5_12_10, CAN1_TX, SEL_CAN1_0),
1100 PINMUX_IPSR_DATA(IP5_12_10, DRACK0), 1099 PINMUX_IPSR_DATA(IP5_12_10, DRACK0),
1101 PINMUX_IPSR_MODSEL_DATA(IP5_12_10, IETX_C, SEL_IEB_2), 1100 PINMUX_IPSR_MSEL(IP5_12_10, IETX_C, SEL_IEB_2),
1102 PINMUX_IPSR_DATA(IP5_14_13, RD_N), 1101 PINMUX_IPSR_DATA(IP5_14_13, RD_N),
1103 PINMUX_IPSR_MODSEL_DATA(IP5_14_13, CAN0_TX, SEL_CAN0_0), 1102 PINMUX_IPSR_MSEL(IP5_14_13, CAN0_TX, SEL_CAN0_0),
1104 PINMUX_IPSR_MODSEL_DATA(IP5_14_13, SCIFA0_SCK_B, SEL_SCFA_1), 1103 PINMUX_IPSR_MSEL(IP5_14_13, SCIFA0_SCK_B, SEL_SCFA_1),
1105 PINMUX_IPSR_DATA(IP5_17_15, RD_WR_N), 1104 PINMUX_IPSR_DATA(IP5_17_15, RD_WR_N),
1106 PINMUX_IPSR_MODSEL_DATA(IP5_17_15, VI1_G3, SEL_VI1_0), 1105 PINMUX_IPSR_MSEL(IP5_17_15, VI1_G3, SEL_VI1_0),
1107 PINMUX_IPSR_MODSEL_DATA(IP5_17_15, VI1_G3_B, SEL_VI1_1), 1106 PINMUX_IPSR_MSEL(IP5_17_15, VI1_G3_B, SEL_VI1_1),
1108 PINMUX_IPSR_DATA(IP5_17_15, VI2_R5), 1107 PINMUX_IPSR_DATA(IP5_17_15, VI2_R5),
1109 PINMUX_IPSR_MODSEL_DATA(IP5_17_15, SCIFA0_RXD_B, SEL_SCFA_1), 1108 PINMUX_IPSR_MSEL(IP5_17_15, SCIFA0_RXD_B, SEL_SCFA_1),
1110 PINMUX_IPSR_DATA(IP5_17_15, INTC_IRQ4_N), 1109 PINMUX_IPSR_DATA(IP5_17_15, INTC_IRQ4_N),
1111 PINMUX_IPSR_DATA(IP5_20_18, WE0_N), 1110 PINMUX_IPSR_DATA(IP5_20_18, WE0_N),
1112 PINMUX_IPSR_MODSEL_DATA(IP5_20_18, IECLK, SEL_IEB_0), 1111 PINMUX_IPSR_MSEL(IP5_20_18, IECLK, SEL_IEB_0),
1113 PINMUX_IPSR_MODSEL_DATA(IP5_20_18, CAN_CLK, SEL_CANCLK_0), 1112 PINMUX_IPSR_MSEL(IP5_20_18, CAN_CLK, SEL_CANCLK_0),
1114 PINMUX_IPSR_MODSEL_DATA(IP5_20_18, VI2_VSYNC_N, SEL_VI2_0), 1113 PINMUX_IPSR_MSEL(IP5_20_18, VI2_VSYNC_N, SEL_VI2_0),
1115 PINMUX_IPSR_MODSEL_DATA(IP5_20_18, SCIFA0_TXD_B, SEL_SCFA_1), 1114 PINMUX_IPSR_MSEL(IP5_20_18, SCIFA0_TXD_B, SEL_SCFA_1),
1116 PINMUX_IPSR_MODSEL_DATA(IP5_20_18, VI2_VSYNC_N_B, SEL_VI2_1), 1115 PINMUX_IPSR_MSEL(IP5_20_18, VI2_VSYNC_N_B, SEL_VI2_1),
1117 PINMUX_IPSR_DATA(IP5_23_21, WE1_N), 1116 PINMUX_IPSR_DATA(IP5_23_21, WE1_N),
1118 PINMUX_IPSR_MODSEL_DATA(IP5_23_21, IERX, SEL_IEB_0), 1117 PINMUX_IPSR_MSEL(IP5_23_21, IERX, SEL_IEB_0),
1119 PINMUX_IPSR_MODSEL_DATA(IP5_23_21, CAN1_RX, SEL_CAN1_0), 1118 PINMUX_IPSR_MSEL(IP5_23_21, CAN1_RX, SEL_CAN1_0),
1120 PINMUX_IPSR_MODSEL_DATA(IP5_23_21, VI1_G4, SEL_VI1_0), 1119 PINMUX_IPSR_MSEL(IP5_23_21, VI1_G4, SEL_VI1_0),
1121 PINMUX_IPSR_MODSEL_DATA(IP5_23_21, VI1_G4_B, SEL_VI1_1), 1120 PINMUX_IPSR_MSEL(IP5_23_21, VI1_G4_B, SEL_VI1_1),
1122 PINMUX_IPSR_DATA(IP5_23_21, VI2_R6), 1121 PINMUX_IPSR_DATA(IP5_23_21, VI2_R6),
1123 PINMUX_IPSR_MODSEL_DATA(IP5_23_21, SCIFA0_CTS_N_B, SEL_SCFA_1), 1122 PINMUX_IPSR_MSEL(IP5_23_21, SCIFA0_CTS_N_B, SEL_SCFA_1),
1124 PINMUX_IPSR_MODSEL_DATA(IP5_23_21, IERX_C, SEL_IEB_2), 1123 PINMUX_IPSR_MSEL(IP5_23_21, IERX_C, SEL_IEB_2),
1125 PINMUX_IPSR_MODSEL_DATA(IP5_26_24, EX_WAIT0, SEL_LBS_0), 1124 PINMUX_IPSR_MSEL(IP5_26_24, EX_WAIT0, SEL_LBS_0),
1126 PINMUX_IPSR_DATA(IP5_26_24, IRQ3), 1125 PINMUX_IPSR_DATA(IP5_26_24, IRQ3),
1127 PINMUX_IPSR_DATA(IP5_26_24, INTC_IRQ3_N), 1126 PINMUX_IPSR_DATA(IP5_26_24, INTC_IRQ3_N),
1128 PINMUX_IPSR_MODSEL_DATA(IP5_26_24, VI3_CLK, SEL_VI3_0), 1127 PINMUX_IPSR_MSEL(IP5_26_24, VI3_CLK, SEL_VI3_0),
1129 PINMUX_IPSR_MODSEL_DATA(IP5_26_24, SCIFA0_RTS_N_B, SEL_SCFA_1), 1128 PINMUX_IPSR_MSEL(IP5_26_24, SCIFA0_RTS_N_B, SEL_SCFA_1),
1130 PINMUX_IPSR_MODSEL_DATA(IP5_26_24, HRX0_B, SEL_HSCIF0_1), 1129 PINMUX_IPSR_MSEL(IP5_26_24, HRX0_B, SEL_HSCIF0_1),
1131 PINMUX_IPSR_MODSEL_DATA(IP5_26_24, MSIOF0_SCK_B, SEL_SOF0_1), 1130 PINMUX_IPSR_MSEL(IP5_26_24, MSIOF0_SCK_B, SEL_SOF0_1),
1132 PINMUX_IPSR_DATA(IP5_29_27, DREQ0_N), 1131 PINMUX_IPSR_DATA(IP5_29_27, DREQ0_N),
1133 PINMUX_IPSR_MODSEL_DATA(IP5_29_27, VI1_HSYNC_N, SEL_VI1_0), 1132 PINMUX_IPSR_MSEL(IP5_29_27, VI1_HSYNC_N, SEL_VI1_0),
1134 PINMUX_IPSR_MODSEL_DATA(IP5_29_27, VI1_HSYNC_N_B, SEL_VI1_1), 1133 PINMUX_IPSR_MSEL(IP5_29_27, VI1_HSYNC_N_B, SEL_VI1_1),
1135 PINMUX_IPSR_DATA(IP5_29_27, VI2_R7), 1134 PINMUX_IPSR_DATA(IP5_29_27, VI2_R7),
1136 PINMUX_IPSR_MODSEL_DATA(IP5_29_27, SSI_SCK78_C, SEL_SSI7_2), 1135 PINMUX_IPSR_MSEL(IP5_29_27, SSI_SCK78_C, SEL_SSI7_2),
1137 PINMUX_IPSR_MODSEL_DATA(IP5_29_27, SSI_WS78_B, SEL_SSI7_1), 1136 PINMUX_IPSR_MSEL(IP5_29_27, SSI_WS78_B, SEL_SSI7_1),
1138 1137
1139 PINMUX_IPSR_DATA(IP6_2_0, DACK0), 1138 PINMUX_IPSR_DATA(IP6_2_0, DACK0),
1140 PINMUX_IPSR_DATA(IP6_2_0, IRQ0), 1139 PINMUX_IPSR_DATA(IP6_2_0, IRQ0),
1141 PINMUX_IPSR_DATA(IP6_2_0, INTC_IRQ0_N), 1140 PINMUX_IPSR_DATA(IP6_2_0, INTC_IRQ0_N),
1142 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SSI_SCK6_B, SEL_SSI6_1), 1141 PINMUX_IPSR_MSEL(IP6_2_0, SSI_SCK6_B, SEL_SSI6_1),
1143 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, VI1_VSYNC_N, SEL_VI1_0), 1142 PINMUX_IPSR_MSEL(IP6_2_0, VI1_VSYNC_N, SEL_VI1_0),
1144 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, VI1_VSYNC_N_B, SEL_VI1_1), 1143 PINMUX_IPSR_MSEL(IP6_2_0, VI1_VSYNC_N_B, SEL_VI1_1),
1145 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SSI_WS78_C, SEL_SSI7_2), 1144 PINMUX_IPSR_MSEL(IP6_2_0, SSI_WS78_C, SEL_SSI7_2),
1146 PINMUX_IPSR_DATA(IP6_5_3, DREQ1_N), 1145 PINMUX_IPSR_DATA(IP6_5_3, DREQ1_N),
1147 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, VI1_CLKENB, SEL_VI1_0), 1146 PINMUX_IPSR_MSEL(IP6_5_3, VI1_CLKENB, SEL_VI1_0),
1148 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, VI1_CLKENB_B, SEL_VI1_1), 1147 PINMUX_IPSR_MSEL(IP6_5_3, VI1_CLKENB_B, SEL_VI1_1),
1149 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SSI_SDATA7_C, SEL_SSI7_2), 1148 PINMUX_IPSR_MSEL(IP6_5_3, SSI_SDATA7_C, SEL_SSI7_2),
1150 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SSI_SCK78_B, SEL_SSI7_1), 1149 PINMUX_IPSR_MSEL(IP6_5_3, SSI_SCK78_B, SEL_SSI7_1),
1151 PINMUX_IPSR_DATA(IP6_8_6, DACK1), 1150 PINMUX_IPSR_DATA(IP6_8_6, DACK1),
1152 PINMUX_IPSR_DATA(IP6_8_6, IRQ1), 1151 PINMUX_IPSR_DATA(IP6_8_6, IRQ1),
1153 PINMUX_IPSR_DATA(IP6_8_6, INTC_IRQ1_N), 1152 PINMUX_IPSR_DATA(IP6_8_6, INTC_IRQ1_N),
1154 PINMUX_IPSR_MODSEL_DATA(IP6_8_6, SSI_WS6_B, SEL_SSI6_1), 1153 PINMUX_IPSR_MSEL(IP6_8_6, SSI_WS6_B, SEL_SSI6_1),
1155 PINMUX_IPSR_MODSEL_DATA(IP6_8_6, SSI_SDATA8_C, SEL_SSI8_2), 1154 PINMUX_IPSR_MSEL(IP6_8_6, SSI_SDATA8_C, SEL_SSI8_2),
1156 PINMUX_IPSR_DATA(IP6_10_9, DREQ2_N), 1155 PINMUX_IPSR_DATA(IP6_10_9, DREQ2_N),
1157 PINMUX_IPSR_MODSEL_DATA(IP6_10_9, HSCK1_B, SEL_HSCIF1_1), 1156 PINMUX_IPSR_MSEL(IP6_10_9, HSCK1_B, SEL_HSCIF1_1),
1158 PINMUX_IPSR_MODSEL_DATA(IP6_10_9, HCTS0_N_B, SEL_HSCIF0_1), 1157 PINMUX_IPSR_MSEL(IP6_10_9, HCTS0_N_B, SEL_HSCIF0_1),
1159 PINMUX_IPSR_MODSEL_DATA(IP6_10_9, MSIOF0_TXD_B, SEL_SOF0_1), 1158 PINMUX_IPSR_MSEL(IP6_10_9, MSIOF0_TXD_B, SEL_SOF0_1),
1160 PINMUX_IPSR_DATA(IP6_13_11, DACK2), 1159 PINMUX_IPSR_DATA(IP6_13_11, DACK2),
1161 PINMUX_IPSR_DATA(IP6_13_11, IRQ2), 1160 PINMUX_IPSR_DATA(IP6_13_11, IRQ2),
1162 PINMUX_IPSR_DATA(IP6_13_11, INTC_IRQ2_N), 1161 PINMUX_IPSR_DATA(IP6_13_11, INTC_IRQ2_N),
1163 PINMUX_IPSR_MODSEL_DATA(IP6_13_11, SSI_SDATA6_B, SEL_SSI6_1), 1162 PINMUX_IPSR_MSEL(IP6_13_11, SSI_SDATA6_B, SEL_SSI6_1),
1164 PINMUX_IPSR_MODSEL_DATA(IP6_13_11, HRTS0_N_B, SEL_HSCIF0_1), 1163 PINMUX_IPSR_MSEL(IP6_13_11, HRTS0_N_B, SEL_HSCIF0_1),
1165 PINMUX_IPSR_MODSEL_DATA(IP6_13_11, MSIOF0_RXD_B, SEL_SOF0_1), 1164 PINMUX_IPSR_MSEL(IP6_13_11, MSIOF0_RXD_B, SEL_SOF0_1),
1166 PINMUX_IPSR_DATA(IP6_16_14, ETH_CRS_DV), 1165 PINMUX_IPSR_DATA(IP6_16_14, ETH_CRS_DV),
1167 PINMUX_IPSR_MODSEL_DATA(IP6_16_14, STP_ISCLK_0_B, SEL_SSP_1), 1166 PINMUX_IPSR_MSEL(IP6_16_14, STP_ISCLK_0_B, SEL_SSP_1),
1168 PINMUX_IPSR_MODSEL_DATA(IP6_16_14, TS_SDEN0_D, SEL_TSIF0_3), 1167 PINMUX_IPSR_MSEL(IP6_16_14, TS_SDEN0_D, SEL_TSIF0_3),
1169 PINMUX_IPSR_MODSEL_DATA(IP6_16_14, GLO_Q0_C, SEL_GPS_2), 1168 PINMUX_IPSR_MSEL(IP6_16_14, GLO_Q0_C, SEL_GPS_2),
1170 PINMUX_IPSR_MODSEL_DATA(IP6_16_14, IIC2_SCL_E, SEL_IIC2_4), 1169 PINMUX_IPSR_MSEL(IP6_16_14, IIC2_SCL_E, SEL_IIC2_4),
1171 PINMUX_IPSR_MODSEL_DATA(IP6_16_14, I2C2_SCL_E, SEL_I2C2_4), 1170 PINMUX_IPSR_MSEL(IP6_16_14, I2C2_SCL_E, SEL_I2C2_4),
1172 PINMUX_IPSR_DATA(IP6_19_17, ETH_RX_ER), 1171 PINMUX_IPSR_DATA(IP6_19_17, ETH_RX_ER),
1173 PINMUX_IPSR_MODSEL_DATA(IP6_19_17, STP_ISD_0_B, SEL_SSP_1), 1172 PINMUX_IPSR_MSEL(IP6_19_17, STP_ISD_0_B, SEL_SSP_1),
1174 PINMUX_IPSR_MODSEL_DATA(IP6_19_17, TS_SPSYNC0_D, SEL_TSIF0_3), 1173 PINMUX_IPSR_MSEL(IP6_19_17, TS_SPSYNC0_D, SEL_TSIF0_3),
1175 PINMUX_IPSR_MODSEL_DATA(IP6_19_17, GLO_Q1_C, SEL_GPS_2), 1174 PINMUX_IPSR_MSEL(IP6_19_17, GLO_Q1_C, SEL_GPS_2),
1176 PINMUX_IPSR_MODSEL_DATA(IP6_19_17, IIC2_SDA_E, SEL_IIC2_4), 1175 PINMUX_IPSR_MSEL(IP6_19_17, IIC2_SDA_E, SEL_IIC2_4),
1177 PINMUX_IPSR_MODSEL_DATA(IP6_19_17, I2C2_SDA_E, SEL_I2C2_4), 1176 PINMUX_IPSR_MSEL(IP6_19_17, I2C2_SDA_E, SEL_I2C2_4),
1178 PINMUX_IPSR_DATA(IP6_22_20, ETH_RXD0), 1177 PINMUX_IPSR_DATA(IP6_22_20, ETH_RXD0),
1179 PINMUX_IPSR_MODSEL_DATA(IP6_22_20, STP_ISEN_0_B, SEL_SSP_1), 1178 PINMUX_IPSR_MSEL(IP6_22_20, STP_ISEN_0_B, SEL_SSP_1),
1180 PINMUX_IPSR_MODSEL_DATA(IP6_22_20, TS_SDAT0_D, SEL_TSIF0_3), 1179 PINMUX_IPSR_MSEL(IP6_22_20, TS_SDAT0_D, SEL_TSIF0_3),
1181 PINMUX_IPSR_MODSEL_DATA(IP6_22_20, GLO_I0_C, SEL_GPS_2), 1180 PINMUX_IPSR_MSEL(IP6_22_20, GLO_I0_C, SEL_GPS_2),
1182 PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCIFB1_SCK_G, SEL_SCIFB1_6), 1181 PINMUX_IPSR_MSEL(IP6_22_20, SCIFB1_SCK_G, SEL_SCIFB1_6),
1183 PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCK1_E, SEL_SCIF1_4), 1182 PINMUX_IPSR_MSEL(IP6_22_20, SCK1_E, SEL_SCIF1_4),
1184 PINMUX_IPSR_DATA(IP6_25_23, ETH_RXD1), 1183 PINMUX_IPSR_DATA(IP6_25_23, ETH_RXD1),
1185 PINMUX_IPSR_MODSEL_DATA(IP6_25_23, HRX0_E, SEL_HSCIF0_4), 1184 PINMUX_IPSR_MSEL(IP6_25_23, HRX0_E, SEL_HSCIF0_4),
1186 PINMUX_IPSR_MODSEL_DATA(IP6_25_23, STP_ISSYNC_0_B, SEL_SSP_1), 1185 PINMUX_IPSR_MSEL(IP6_25_23, STP_ISSYNC_0_B, SEL_SSP_1),
1187 PINMUX_IPSR_MODSEL_DATA(IP6_25_23, TS_SCK0_D, SEL_TSIF0_3), 1186 PINMUX_IPSR_MSEL(IP6_25_23, TS_SCK0_D, SEL_TSIF0_3),
1188 PINMUX_IPSR_MODSEL_DATA(IP6_25_23, GLO_I1_C, SEL_GPS_2), 1187 PINMUX_IPSR_MSEL(IP6_25_23, GLO_I1_C, SEL_GPS_2),
1189 PINMUX_IPSR_MODSEL_DATA(IP6_25_23, SCIFB1_RXD_G, SEL_SCIFB1_6), 1188 PINMUX_IPSR_MSEL(IP6_25_23, SCIFB1_RXD_G, SEL_SCIFB1_6),
1190 PINMUX_IPSR_MODSEL_DATA(IP6_25_23, RX1_E, SEL_SCIF1_4), 1189 PINMUX_IPSR_MSEL(IP6_25_23, RX1_E, SEL_SCIF1_4),
1191 PINMUX_IPSR_DATA(IP6_28_26, ETH_LINK), 1190 PINMUX_IPSR_DATA(IP6_28_26, ETH_LINK),
1192 PINMUX_IPSR_MODSEL_DATA(IP6_28_26, HTX0_E, SEL_HSCIF0_4), 1191 PINMUX_IPSR_MSEL(IP6_28_26, HTX0_E, SEL_HSCIF0_4),
1193 PINMUX_IPSR_MODSEL_DATA(IP6_28_26, STP_IVCXO27_0_B, SEL_SSP_1), 1192 PINMUX_IPSR_MSEL(IP6_28_26, STP_IVCXO27_0_B, SEL_SSP_1),
1194 PINMUX_IPSR_MODSEL_DATA(IP6_28_26, SCIFB1_TXD_G, SEL_SCIFB1_6), 1193 PINMUX_IPSR_MSEL(IP6_28_26, SCIFB1_TXD_G, SEL_SCIFB1_6),
1195 PINMUX_IPSR_MODSEL_DATA(IP6_28_26, TX1_E, SEL_SCIF1_4), 1194 PINMUX_IPSR_MSEL(IP6_28_26, TX1_E, SEL_SCIF1_4),
1196 PINMUX_IPSR_DATA(IP6_31_29, ETH_REF_CLK), 1195 PINMUX_IPSR_DATA(IP6_31_29, ETH_REF_CLK),
1197 PINMUX_IPSR_MODSEL_DATA(IP6_31_29, HCTS0_N_E, SEL_HSCIF0_4), 1196 PINMUX_IPSR_MSEL(IP6_31_29, HCTS0_N_E, SEL_HSCIF0_4),
1198 PINMUX_IPSR_MODSEL_DATA(IP6_31_29, STP_IVCXO27_1_B, SEL_SSP_1), 1197 PINMUX_IPSR_MSEL(IP6_31_29, STP_IVCXO27_1_B, SEL_SSP_1),
1199 PINMUX_IPSR_MODSEL_DATA(IP6_31_29, HRX0_F, SEL_HSCIF0_5), 1198 PINMUX_IPSR_MSEL(IP6_31_29, HRX0_F, SEL_HSCIF0_5),
1200 1199
1201 PINMUX_IPSR_DATA(IP7_2_0, ETH_MDIO), 1200 PINMUX_IPSR_DATA(IP7_2_0, ETH_MDIO),
1202 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, HRTS0_N_E, SEL_HSCIF0_4), 1201 PINMUX_IPSR_MSEL(IP7_2_0, HRTS0_N_E, SEL_HSCIF0_4),
1203 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, SIM0_D_C, SEL_SIM_2), 1202 PINMUX_IPSR_MSEL(IP7_2_0, SIM0_D_C, SEL_SIM_2),
1204 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, HCTS0_N_F, SEL_HSCIF0_5), 1203 PINMUX_IPSR_MSEL(IP7_2_0, HCTS0_N_F, SEL_HSCIF0_5),
1205 PINMUX_IPSR_DATA(IP7_5_3, ETH_TXD1), 1204 PINMUX_IPSR_DATA(IP7_5_3, ETH_TXD1),
1206 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, HTX0_F, SEL_HSCIF0_5), 1205 PINMUX_IPSR_MSEL(IP7_5_3, HTX0_F, SEL_HSCIF0_5),
1207 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, BPFCLK_G, SEL_FM_6), 1206 PINMUX_IPSR_MSEL(IP7_5_3, BPFCLK_G, SEL_FM_6),
1208 PINMUX_IPSR_DATA(IP7_7_6, ETH_TX_EN), 1207 PINMUX_IPSR_DATA(IP7_7_6, ETH_TX_EN),
1209 PINMUX_IPSR_MODSEL_DATA(IP7_7_6, SIM0_CLK_C, SEL_SIM_2), 1208 PINMUX_IPSR_MSEL(IP7_7_6, SIM0_CLK_C, SEL_SIM_2),
1210 PINMUX_IPSR_MODSEL_DATA(IP7_7_6, HRTS0_N_F, SEL_HSCIF0_5), 1209 PINMUX_IPSR_MSEL(IP7_7_6, HRTS0_N_F, SEL_HSCIF0_5),
1211 PINMUX_IPSR_DATA(IP7_9_8, ETH_MAGIC), 1210 PINMUX_IPSR_DATA(IP7_9_8, ETH_MAGIC),
1212 PINMUX_IPSR_MODSEL_DATA(IP7_9_8, SIM0_RST_C, SEL_SIM_2), 1211 PINMUX_IPSR_MSEL(IP7_9_8, SIM0_RST_C, SEL_SIM_2),
1213 PINMUX_IPSR_DATA(IP7_12_10, ETH_TXD0), 1212 PINMUX_IPSR_DATA(IP7_12_10, ETH_TXD0),
1214 PINMUX_IPSR_MODSEL_DATA(IP7_12_10, STP_ISCLK_1_B, SEL_SSP_1), 1213 PINMUX_IPSR_MSEL(IP7_12_10, STP_ISCLK_1_B, SEL_SSP_1),
1215 PINMUX_IPSR_MODSEL_DATA(IP7_12_10, TS_SDEN1_C, SEL_TSIF1_2), 1214 PINMUX_IPSR_MSEL(IP7_12_10, TS_SDEN1_C, SEL_TSIF1_2),
1216 PINMUX_IPSR_MODSEL_DATA(IP7_12_10, GLO_SCLK_C, SEL_GPS_2), 1215 PINMUX_IPSR_MSEL(IP7_12_10, GLO_SCLK_C, SEL_GPS_2),
1217 PINMUX_IPSR_DATA(IP7_15_13, ETH_MDC), 1216 PINMUX_IPSR_DATA(IP7_15_13, ETH_MDC),
1218 PINMUX_IPSR_MODSEL_DATA(IP7_15_13, STP_ISD_1_B, SEL_SSP_1), 1217 PINMUX_IPSR_MSEL(IP7_15_13, STP_ISD_1_B, SEL_SSP_1),
1219 PINMUX_IPSR_MODSEL_DATA(IP7_15_13, TS_SPSYNC1_C, SEL_TSIF1_2), 1218 PINMUX_IPSR_MSEL(IP7_15_13, TS_SPSYNC1_C, SEL_TSIF1_2),
1220 PINMUX_IPSR_MODSEL_DATA(IP7_15_13, GLO_SDATA_C, SEL_GPS_2), 1219 PINMUX_IPSR_MSEL(IP7_15_13, GLO_SDATA_C, SEL_GPS_2),
1221 PINMUX_IPSR_DATA(IP7_18_16, PWM0), 1220 PINMUX_IPSR_DATA(IP7_18_16, PWM0),
1222 PINMUX_IPSR_MODSEL_DATA(IP7_18_16, SCIFA2_SCK_C, SEL_SCIFA2_2), 1221 PINMUX_IPSR_MSEL(IP7_18_16, SCIFA2_SCK_C, SEL_SCIFA2_2),
1223 PINMUX_IPSR_MODSEL_DATA(IP7_18_16, STP_ISEN_1_B, SEL_SSP_1), 1222 PINMUX_IPSR_MSEL(IP7_18_16, STP_ISEN_1_B, SEL_SSP_1),
1224 PINMUX_IPSR_MODSEL_DATA(IP7_18_16, TS_SDAT1_C, SEL_TSIF1_2), 1223 PINMUX_IPSR_MSEL(IP7_18_16, TS_SDAT1_C, SEL_TSIF1_2),
1225 PINMUX_IPSR_MODSEL_DATA(IP7_18_16, GLO_SS_C, SEL_GPS_2), 1224 PINMUX_IPSR_MSEL(IP7_18_16, GLO_SS_C, SEL_GPS_2),
1226 PINMUX_IPSR_DATA(IP7_21_19, PWM1), 1225 PINMUX_IPSR_DATA(IP7_21_19, PWM1),
1227 PINMUX_IPSR_MODSEL_DATA(IP7_21_19, SCIFA2_TXD_C, SEL_SCIFA2_2), 1226 PINMUX_IPSR_MSEL(IP7_21_19, SCIFA2_TXD_C, SEL_SCIFA2_2),
1228 PINMUX_IPSR_MODSEL_DATA(IP7_21_19, STP_ISSYNC_1_B, SEL_SSP_1), 1227 PINMUX_IPSR_MSEL(IP7_21_19, STP_ISSYNC_1_B, SEL_SSP_1),
1229 PINMUX_IPSR_MODSEL_DATA(IP7_21_19, TS_SCK1_C, SEL_TSIF1_2), 1228 PINMUX_IPSR_MSEL(IP7_21_19, TS_SCK1_C, SEL_TSIF1_2),
1230 PINMUX_IPSR_MODSEL_DATA(IP7_21_19, GLO_RFON_C, SEL_GPS_2), 1229 PINMUX_IPSR_MSEL(IP7_21_19, GLO_RFON_C, SEL_GPS_2),
1231 PINMUX_IPSR_DATA(IP7_21_19, PCMOE_N), 1230 PINMUX_IPSR_DATA(IP7_21_19, PCMOE_N),
1232 PINMUX_IPSR_DATA(IP7_24_22, PWM2), 1231 PINMUX_IPSR_DATA(IP7_24_22, PWM2),
1233 PINMUX_IPSR_DATA(IP7_24_22, PWMFSW0), 1232 PINMUX_IPSR_DATA(IP7_24_22, PWMFSW0),
1234 PINMUX_IPSR_MODSEL_DATA(IP7_24_22, SCIFA2_RXD_C, SEL_SCIFA2_2), 1233 PINMUX_IPSR_MSEL(IP7_24_22, SCIFA2_RXD_C, SEL_SCIFA2_2),
1235 PINMUX_IPSR_DATA(IP7_24_22, PCMWE_N), 1234 PINMUX_IPSR_DATA(IP7_24_22, PCMWE_N),
1236 PINMUX_IPSR_MODSEL_DATA(IP7_24_22, IECLK_C, SEL_IEB_2), 1235 PINMUX_IPSR_MSEL(IP7_24_22, IECLK_C, SEL_IEB_2),
1237 PINMUX_IPSR_DATA(IP7_26_25, DU_DOTCLKIN1), 1236 PINMUX_IPSR_DATA(IP7_26_25, DU_DOTCLKIN1),
1238 PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKC), 1237 PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKC),
1239 PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKOUT_C), 1238 PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKOUT_C),
1240 PINMUX_IPSR_MODSEL_DATA(IP7_28_27, VI0_CLK, SEL_VI0_0), 1239 PINMUX_IPSR_MSEL(IP7_28_27, VI0_CLK, SEL_VI0_0),
1241 PINMUX_IPSR_DATA(IP7_28_27, ATACS00_N), 1240 PINMUX_IPSR_DATA(IP7_28_27, ATACS00_N),
1242 PINMUX_IPSR_DATA(IP7_28_27, AVB_RXD1), 1241 PINMUX_IPSR_DATA(IP7_28_27, AVB_RXD1),
1243 PINMUX_IPSR_MODSEL_DATA(IP7_30_29, VI0_DATA0_VI0_B0, SEL_VI0_0), 1242 PINMUX_IPSR_MSEL(IP7_30_29, VI0_DATA0_VI0_B0, SEL_VI0_0),
1244 PINMUX_IPSR_DATA(IP7_30_29, ATACS10_N), 1243 PINMUX_IPSR_DATA(IP7_30_29, ATACS10_N),
1245 PINMUX_IPSR_DATA(IP7_30_29, AVB_RXD2), 1244 PINMUX_IPSR_DATA(IP7_30_29, AVB_RXD2),
1246 1245
1247 PINMUX_IPSR_MODSEL_DATA(IP8_1_0, VI0_DATA1_VI0_B1, SEL_VI0_0), 1246 PINMUX_IPSR_MSEL(IP8_1_0, VI0_DATA1_VI0_B1, SEL_VI0_0),
1248 PINMUX_IPSR_DATA(IP8_1_0, ATARD0_N), 1247 PINMUX_IPSR_DATA(IP8_1_0, ATARD0_N),
1249 PINMUX_IPSR_DATA(IP8_1_0, AVB_RXD3), 1248 PINMUX_IPSR_DATA(IP8_1_0, AVB_RXD3),
1250 PINMUX_IPSR_MODSEL_DATA(IP8_3_2, VI0_DATA2_VI0_B2, SEL_VI0_0), 1249 PINMUX_IPSR_MSEL(IP8_3_2, VI0_DATA2_VI0_B2, SEL_VI0_0),
1251 PINMUX_IPSR_DATA(IP8_3_2, ATAWR0_N), 1250 PINMUX_IPSR_DATA(IP8_3_2, ATAWR0_N),
1252 PINMUX_IPSR_DATA(IP8_3_2, AVB_RXD4), 1251 PINMUX_IPSR_DATA(IP8_3_2, AVB_RXD4),
1253 PINMUX_IPSR_MODSEL_DATA(IP8_5_4, VI0_DATA3_VI0_B3, SEL_VI0_0), 1252 PINMUX_IPSR_MSEL(IP8_5_4, VI0_DATA3_VI0_B3, SEL_VI0_0),
1254 PINMUX_IPSR_DATA(IP8_5_4, ATADIR0_N), 1253 PINMUX_IPSR_DATA(IP8_5_4, ATADIR0_N),
1255 PINMUX_IPSR_DATA(IP8_5_4, AVB_RXD5), 1254 PINMUX_IPSR_DATA(IP8_5_4, AVB_RXD5),
1256 PINMUX_IPSR_MODSEL_DATA(IP8_7_6, VI0_DATA4_VI0_B4, SEL_VI0_0), 1255 PINMUX_IPSR_MSEL(IP8_7_6, VI0_DATA4_VI0_B4, SEL_VI0_0),
1257 PINMUX_IPSR_DATA(IP8_7_6, ATAG0_N), 1256 PINMUX_IPSR_DATA(IP8_7_6, ATAG0_N),
1258 PINMUX_IPSR_DATA(IP8_7_6, AVB_RXD6), 1257 PINMUX_IPSR_DATA(IP8_7_6, AVB_RXD6),
1259 PINMUX_IPSR_MODSEL_DATA(IP8_9_8, VI0_DATA5_VI0_B5, SEL_VI0_0), 1258 PINMUX_IPSR_MSEL(IP8_9_8, VI0_DATA5_VI0_B5, SEL_VI0_0),
1260 PINMUX_IPSR_DATA(IP8_9_8, EX_WAIT1), 1259 PINMUX_IPSR_DATA(IP8_9_8, EX_WAIT1),
1261 PINMUX_IPSR_DATA(IP8_9_8, AVB_RXD7), 1260 PINMUX_IPSR_DATA(IP8_9_8, AVB_RXD7),
1262 PINMUX_IPSR_MODSEL_DATA(IP8_11_10, VI0_DATA6_VI0_B6, SEL_VI0_0), 1261 PINMUX_IPSR_MSEL(IP8_11_10, VI0_DATA6_VI0_B6, SEL_VI0_0),
1263 PINMUX_IPSR_DATA(IP8_11_10, AVB_RX_ER), 1262 PINMUX_IPSR_DATA(IP8_11_10, AVB_RX_ER),
1264 PINMUX_IPSR_MODSEL_DATA(IP8_13_12, VI0_DATA7_VI0_B7, SEL_VI0_0), 1263 PINMUX_IPSR_MSEL(IP8_13_12, VI0_DATA7_VI0_B7, SEL_VI0_0),
1265 PINMUX_IPSR_DATA(IP8_13_12, AVB_RX_CLK), 1264 PINMUX_IPSR_DATA(IP8_13_12, AVB_RX_CLK),
1266 PINMUX_IPSR_MODSEL_DATA(IP8_15_14, VI1_CLK, SEL_VI1_0), 1265 PINMUX_IPSR_MSEL(IP8_15_14, VI1_CLK, SEL_VI1_0),
1267 PINMUX_IPSR_DATA(IP8_15_14, AVB_RX_DV), 1266 PINMUX_IPSR_DATA(IP8_15_14, AVB_RX_DV),
1268 PINMUX_IPSR_MODSEL_DATA(IP8_17_16, VI1_DATA0_VI1_B0, SEL_VI1_0), 1267 PINMUX_IPSR_MSEL(IP8_17_16, VI1_DATA0_VI1_B0, SEL_VI1_0),
1269 PINMUX_IPSR_MODSEL_DATA(IP8_17_16, SCIFA1_SCK_D, SEL_SCIFA1_3), 1268 PINMUX_IPSR_MSEL(IP8_17_16, SCIFA1_SCK_D, SEL_SCIFA1_3),
1270 PINMUX_IPSR_DATA(IP8_17_16, AVB_CRS), 1269 PINMUX_IPSR_DATA(IP8_17_16, AVB_CRS),
1271 PINMUX_IPSR_MODSEL_DATA(IP8_19_18, VI1_DATA1_VI1_B1, SEL_VI1_0), 1270 PINMUX_IPSR_MSEL(IP8_19_18, VI1_DATA1_VI1_B1, SEL_VI1_0),
1272 PINMUX_IPSR_MODSEL_DATA(IP8_19_18, SCIFA1_RXD_D, SEL_SCIFA1_3), 1271 PINMUX_IPSR_MSEL(IP8_19_18, SCIFA1_RXD_D, SEL_SCIFA1_3),
1273 PINMUX_IPSR_DATA(IP8_19_18, AVB_MDC), 1272 PINMUX_IPSR_DATA(IP8_19_18, AVB_MDC),
1274 PINMUX_IPSR_MODSEL_DATA(IP8_21_20, VI1_DATA2_VI1_B2, SEL_VI1_0), 1273 PINMUX_IPSR_MSEL(IP8_21_20, VI1_DATA2_VI1_B2, SEL_VI1_0),
1275 PINMUX_IPSR_MODSEL_DATA(IP8_21_20, SCIFA1_TXD_D, SEL_SCIFA1_3), 1274 PINMUX_IPSR_MSEL(IP8_21_20, SCIFA1_TXD_D, SEL_SCIFA1_3),
1276 PINMUX_IPSR_DATA(IP8_21_20, AVB_MDIO), 1275 PINMUX_IPSR_DATA(IP8_21_20, AVB_MDIO),
1277 PINMUX_IPSR_MODSEL_DATA(IP8_23_22, VI1_DATA3_VI1_B3, SEL_VI1_0), 1276 PINMUX_IPSR_MSEL(IP8_23_22, VI1_DATA3_VI1_B3, SEL_VI1_0),
1278 PINMUX_IPSR_MODSEL_DATA(IP8_23_22, SCIFA1_CTS_N_D, SEL_SCIFA1_3), 1277 PINMUX_IPSR_MSEL(IP8_23_22, SCIFA1_CTS_N_D, SEL_SCIFA1_3),
1279 PINMUX_IPSR_DATA(IP8_23_22, AVB_GTX_CLK), 1278 PINMUX_IPSR_DATA(IP8_23_22, AVB_GTX_CLK),
1280 PINMUX_IPSR_MODSEL_DATA(IP8_25_24, VI1_DATA4_VI1_B4, SEL_VI1_0), 1279 PINMUX_IPSR_MSEL(IP8_25_24, VI1_DATA4_VI1_B4, SEL_VI1_0),
1281 PINMUX_IPSR_MODSEL_DATA(IP8_25_24, SCIFA1_RTS_N_D, SEL_SCIFA1_3), 1280 PINMUX_IPSR_MSEL(IP8_25_24, SCIFA1_RTS_N_D, SEL_SCIFA1_3),
1282 PINMUX_IPSR_DATA(IP8_25_24, AVB_MAGIC), 1281 PINMUX_IPSR_DATA(IP8_25_24, AVB_MAGIC),
1283 PINMUX_IPSR_MODSEL_DATA(IP8_26, VI1_DATA5_VI1_B5, SEL_VI1_0), 1282 PINMUX_IPSR_MSEL(IP8_26, VI1_DATA5_VI1_B5, SEL_VI1_0),
1284 PINMUX_IPSR_DATA(IP8_26, AVB_PHY_INT), 1283 PINMUX_IPSR_DATA(IP8_26, AVB_PHY_INT),
1285 PINMUX_IPSR_MODSEL_DATA(IP8_27, VI1_DATA6_VI1_B6, SEL_VI1_0), 1284 PINMUX_IPSR_MSEL(IP8_27, VI1_DATA6_VI1_B6, SEL_VI1_0),
1286 PINMUX_IPSR_DATA(IP8_27, AVB_GTXREFCLK), 1285 PINMUX_IPSR_DATA(IP8_27, AVB_GTXREFCLK),
1287 PINMUX_IPSR_DATA(IP8_28, SD0_CLK), 1286 PINMUX_IPSR_DATA(IP8_28, SD0_CLK),
1288 PINMUX_IPSR_MODSEL_DATA(IP8_28, VI1_DATA0_VI1_B0_B, SEL_VI1_1), 1287 PINMUX_IPSR_MSEL(IP8_28, VI1_DATA0_VI1_B0_B, SEL_VI1_1),
1289 PINMUX_IPSR_DATA(IP8_30_29, SD0_CMD), 1288 PINMUX_IPSR_DATA(IP8_30_29, SD0_CMD),
1290 PINMUX_IPSR_MODSEL_DATA(IP8_30_29, SCIFB1_SCK_B, SEL_SCIFB1_1), 1289 PINMUX_IPSR_MSEL(IP8_30_29, SCIFB1_SCK_B, SEL_SCIFB1_1),
1291 PINMUX_IPSR_MODSEL_DATA(IP8_30_29, VI1_DATA1_VI1_B1_B, SEL_VI1_1), 1290 PINMUX_IPSR_MSEL(IP8_30_29, VI1_DATA1_VI1_B1_B, SEL_VI1_1),
1292 1291
1293 PINMUX_IPSR_DATA(IP9_1_0, SD0_DAT0), 1292 PINMUX_IPSR_DATA(IP9_1_0, SD0_DAT0),
1294 PINMUX_IPSR_MODSEL_DATA(IP9_1_0, SCIFB1_RXD_B, SEL_SCIFB1_1), 1293 PINMUX_IPSR_MSEL(IP9_1_0, SCIFB1_RXD_B, SEL_SCIFB1_1),
1295 PINMUX_IPSR_MODSEL_DATA(IP9_1_0, VI1_DATA2_VI1_B2_B, SEL_VI1_1), 1294 PINMUX_IPSR_MSEL(IP9_1_0, VI1_DATA2_VI1_B2_B, SEL_VI1_1),
1296 PINMUX_IPSR_DATA(IP9_3_2, SD0_DAT1), 1295 PINMUX_IPSR_DATA(IP9_3_2, SD0_DAT1),
1297 PINMUX_IPSR_MODSEL_DATA(IP9_3_2, SCIFB1_TXD_B, SEL_SCIFB1_1), 1296 PINMUX_IPSR_MSEL(IP9_3_2, SCIFB1_TXD_B, SEL_SCIFB1_1),
1298 PINMUX_IPSR_MODSEL_DATA(IP9_3_2, VI1_DATA3_VI1_B3_B, SEL_VI1_1), 1297 PINMUX_IPSR_MSEL(IP9_3_2, VI1_DATA3_VI1_B3_B, SEL_VI1_1),
1299 PINMUX_IPSR_DATA(IP9_5_4, SD0_DAT2), 1298 PINMUX_IPSR_DATA(IP9_5_4, SD0_DAT2),
1300 PINMUX_IPSR_MODSEL_DATA(IP9_5_4, SCIFB1_CTS_N_B, SEL_SCIFB1_1), 1299 PINMUX_IPSR_MSEL(IP9_5_4, SCIFB1_CTS_N_B, SEL_SCIFB1_1),
1301 PINMUX_IPSR_MODSEL_DATA(IP9_5_4, VI1_DATA4_VI1_B4_B, SEL_VI1_1), 1300 PINMUX_IPSR_MSEL(IP9_5_4, VI1_DATA4_VI1_B4_B, SEL_VI1_1),
1302 PINMUX_IPSR_DATA(IP9_7_6, SD0_DAT3), 1301 PINMUX_IPSR_DATA(IP9_7_6, SD0_DAT3),
1303 PINMUX_IPSR_MODSEL_DATA(IP9_7_6, SCIFB1_RTS_N_B, SEL_SCIFB1_1), 1302 PINMUX_IPSR_MSEL(IP9_7_6, SCIFB1_RTS_N_B, SEL_SCIFB1_1),
1304 PINMUX_IPSR_MODSEL_DATA(IP9_7_6, VI1_DATA5_VI1_B5_B, SEL_VI1_1), 1303 PINMUX_IPSR_MSEL(IP9_7_6, VI1_DATA5_VI1_B5_B, SEL_VI1_1),
1305 PINMUX_IPSR_DATA(IP9_11_8, SD0_CD), 1304 PINMUX_IPSR_DATA(IP9_11_8, SD0_CD),
1306 PINMUX_IPSR_DATA(IP9_11_8, MMC0_D6), 1305 PINMUX_IPSR_DATA(IP9_11_8, MMC0_D6),
1307 PINMUX_IPSR_MODSEL_DATA(IP9_11_8, TS_SDEN0_B, SEL_TSIF0_1), 1306 PINMUX_IPSR_MSEL(IP9_11_8, TS_SDEN0_B, SEL_TSIF0_1),
1308 PINMUX_IPSR_DATA(IP9_11_8, USB0_EXTP), 1307 PINMUX_IPSR_DATA(IP9_11_8, USB0_EXTP),
1309 PINMUX_IPSR_MODSEL_DATA(IP9_11_8, GLO_SCLK, SEL_GPS_0), 1308 PINMUX_IPSR_MSEL(IP9_11_8, GLO_SCLK, SEL_GPS_0),
1310 PINMUX_IPSR_MODSEL_DATA(IP9_11_8, VI1_DATA6_VI1_B6_B, SEL_VI1_1), 1309 PINMUX_IPSR_MSEL(IP9_11_8, VI1_DATA6_VI1_B6_B, SEL_VI1_1),
1311 PINMUX_IPSR_MODSEL_DATA(IP9_11_8, IIC1_SCL_B, SEL_IIC1_1), 1310 PINMUX_IPSR_MSEL(IP9_11_8, IIC1_SCL_B, SEL_IIC1_1),
1312 PINMUX_IPSR_MODSEL_DATA(IP9_11_8, I2C1_SCL_B, SEL_I2C1_1), 1311 PINMUX_IPSR_MSEL(IP9_11_8, I2C1_SCL_B, SEL_I2C1_1),
1313 PINMUX_IPSR_MODSEL_DATA(IP9_11_8, VI2_DATA6_VI2_B6_B, SEL_VI2_1), 1312 PINMUX_IPSR_MSEL(IP9_11_8, VI2_DATA6_VI2_B6_B, SEL_VI2_1),
1314 PINMUX_IPSR_DATA(IP9_15_12, SD0_WP), 1313 PINMUX_IPSR_DATA(IP9_15_12, SD0_WP),
1315 PINMUX_IPSR_DATA(IP9_15_12, MMC0_D7), 1314 PINMUX_IPSR_DATA(IP9_15_12, MMC0_D7),
1316 PINMUX_IPSR_MODSEL_DATA(IP9_15_12, TS_SPSYNC0_B, SEL_TSIF0_1), 1315 PINMUX_IPSR_MSEL(IP9_15_12, TS_SPSYNC0_B, SEL_TSIF0_1),
1317 PINMUX_IPSR_DATA(IP9_15_12, USB0_IDIN), 1316 PINMUX_IPSR_DATA(IP9_15_12, USB0_IDIN),
1318 PINMUX_IPSR_MODSEL_DATA(IP9_15_12, GLO_SDATA, SEL_GPS_0), 1317 PINMUX_IPSR_MSEL(IP9_15_12, GLO_SDATA, SEL_GPS_0),
1319 PINMUX_IPSR_MODSEL_DATA(IP9_15_12, VI1_DATA7_VI1_B7_B, SEL_VI1_1), 1318 PINMUX_IPSR_MSEL(IP9_15_12, VI1_DATA7_VI1_B7_B, SEL_VI1_1),
1320 PINMUX_IPSR_MODSEL_DATA(IP9_15_12, IIC1_SDA_B, SEL_IIC1_1), 1319 PINMUX_IPSR_MSEL(IP9_15_12, IIC1_SDA_B, SEL_IIC1_1),
1321 PINMUX_IPSR_MODSEL_DATA(IP9_15_12, I2C1_SDA_B, SEL_I2C1_1), 1320 PINMUX_IPSR_MSEL(IP9_15_12, I2C1_SDA_B, SEL_I2C1_1),
1322 PINMUX_IPSR_MODSEL_DATA(IP9_15_12, VI2_DATA7_VI2_B7_B, SEL_VI2_1), 1321 PINMUX_IPSR_MSEL(IP9_15_12, VI2_DATA7_VI2_B7_B, SEL_VI2_1),
1323 PINMUX_IPSR_DATA(IP9_17_16, SD1_CLK), 1322 PINMUX_IPSR_DATA(IP9_17_16, SD1_CLK),
1324 PINMUX_IPSR_DATA(IP9_17_16, AVB_TX_EN), 1323 PINMUX_IPSR_DATA(IP9_17_16, AVB_TX_EN),
1325 PINMUX_IPSR_DATA(IP9_19_18, SD1_CMD), 1324 PINMUX_IPSR_DATA(IP9_19_18, SD1_CMD),
1326 PINMUX_IPSR_DATA(IP9_19_18, AVB_TX_ER), 1325 PINMUX_IPSR_DATA(IP9_19_18, AVB_TX_ER),
1327 PINMUX_IPSR_MODSEL_DATA(IP9_19_18, SCIFB0_SCK_B, SEL_SCIFB_1), 1326 PINMUX_IPSR_MSEL(IP9_19_18, SCIFB0_SCK_B, SEL_SCIFB_1),
1328 PINMUX_IPSR_DATA(IP9_21_20, SD1_DAT0), 1327 PINMUX_IPSR_DATA(IP9_21_20, SD1_DAT0),
1329 PINMUX_IPSR_DATA(IP9_21_20, AVB_TX_CLK), 1328 PINMUX_IPSR_DATA(IP9_21_20, AVB_TX_CLK),
1330 PINMUX_IPSR_MODSEL_DATA(IP9_21_20, SCIFB0_RXD_B, SEL_SCIFB_1), 1329 PINMUX_IPSR_MSEL(IP9_21_20, SCIFB0_RXD_B, SEL_SCIFB_1),
1331 PINMUX_IPSR_DATA(IP9_23_22, SD1_DAT1), 1330 PINMUX_IPSR_DATA(IP9_23_22, SD1_DAT1),
1332 PINMUX_IPSR_DATA(IP9_23_22, AVB_LINK), 1331 PINMUX_IPSR_DATA(IP9_23_22, AVB_LINK),
1333 PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SCIFB0_TXD_B, SEL_SCIFB_1), 1332 PINMUX_IPSR_MSEL(IP9_23_22, SCIFB0_TXD_B, SEL_SCIFB_1),
1334 PINMUX_IPSR_DATA(IP9_25_24, SD1_DAT2), 1333 PINMUX_IPSR_DATA(IP9_25_24, SD1_DAT2),
1335 PINMUX_IPSR_DATA(IP9_25_24, AVB_COL), 1334 PINMUX_IPSR_DATA(IP9_25_24, AVB_COL),
1336 PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SCIFB0_CTS_N_B, SEL_SCIFB_1), 1335 PINMUX_IPSR_MSEL(IP9_25_24, SCIFB0_CTS_N_B, SEL_SCIFB_1),
1337 PINMUX_IPSR_DATA(IP9_27_26, SD1_DAT3), 1336 PINMUX_IPSR_DATA(IP9_27_26, SD1_DAT3),
1338 PINMUX_IPSR_DATA(IP9_27_26, AVB_RXD0), 1337 PINMUX_IPSR_DATA(IP9_27_26, AVB_RXD0),
1339 PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SCIFB0_RTS_N_B, SEL_SCIFB_1), 1338 PINMUX_IPSR_MSEL(IP9_27_26, SCIFB0_RTS_N_B, SEL_SCIFB_1),
1340 PINMUX_IPSR_DATA(IP9_31_28, SD1_CD), 1339 PINMUX_IPSR_DATA(IP9_31_28, SD1_CD),
1341 PINMUX_IPSR_DATA(IP9_31_28, MMC1_D6), 1340 PINMUX_IPSR_DATA(IP9_31_28, MMC1_D6),
1342 PINMUX_IPSR_MODSEL_DATA(IP9_31_28, TS_SDEN1, SEL_TSIF1_0), 1341 PINMUX_IPSR_MSEL(IP9_31_28, TS_SDEN1, SEL_TSIF1_0),
1343 PINMUX_IPSR_DATA(IP9_31_28, USB1_EXTP), 1342 PINMUX_IPSR_DATA(IP9_31_28, USB1_EXTP),
1344 PINMUX_IPSR_MODSEL_DATA(IP9_31_28, GLO_SS, SEL_GPS_0), 1343 PINMUX_IPSR_MSEL(IP9_31_28, GLO_SS, SEL_GPS_0),
1345 PINMUX_IPSR_MODSEL_DATA(IP9_31_28, VI0_CLK_B, SEL_VI0_1), 1344 PINMUX_IPSR_MSEL(IP9_31_28, VI0_CLK_B, SEL_VI0_1),
1346 PINMUX_IPSR_MODSEL_DATA(IP9_31_28, IIC2_SCL_D, SEL_IIC2_3), 1345 PINMUX_IPSR_MSEL(IP9_31_28, IIC2_SCL_D, SEL_IIC2_3),
1347 PINMUX_IPSR_MODSEL_DATA(IP9_31_28, I2C2_SCL_D, SEL_I2C2_3), 1346 PINMUX_IPSR_MSEL(IP9_31_28, I2C2_SCL_D, SEL_I2C2_3),
1348 PINMUX_IPSR_MODSEL_DATA(IP9_31_28, SIM0_CLK_B, SEL_SIM_1), 1347 PINMUX_IPSR_MSEL(IP9_31_28, SIM0_CLK_B, SEL_SIM_1),
1349 PINMUX_IPSR_MODSEL_DATA(IP9_31_28, VI3_CLK_B, SEL_VI3_1), 1348 PINMUX_IPSR_MSEL(IP9_31_28, VI3_CLK_B, SEL_VI3_1),
1350 1349
1351 PINMUX_IPSR_DATA(IP10_3_0, SD1_WP), 1350 PINMUX_IPSR_DATA(IP10_3_0, SD1_WP),
1352 PINMUX_IPSR_DATA(IP10_3_0, MMC1_D7), 1351 PINMUX_IPSR_DATA(IP10_3_0, MMC1_D7),
1353 PINMUX_IPSR_MODSEL_DATA(IP10_3_0, TS_SPSYNC1, SEL_TSIF1_0), 1352 PINMUX_IPSR_MSEL(IP10_3_0, TS_SPSYNC1, SEL_TSIF1_0),
1354 PINMUX_IPSR_DATA(IP10_3_0, USB1_IDIN), 1353 PINMUX_IPSR_DATA(IP10_3_0, USB1_IDIN),
1355 PINMUX_IPSR_MODSEL_DATA(IP10_3_0, GLO_RFON, SEL_GPS_0), 1354 PINMUX_IPSR_MSEL(IP10_3_0, GLO_RFON, SEL_GPS_0),
1356 PINMUX_IPSR_MODSEL_DATA(IP10_3_0, VI1_CLK_B, SEL_VI1_1), 1355 PINMUX_IPSR_MSEL(IP10_3_0, VI1_CLK_B, SEL_VI1_1),
1357 PINMUX_IPSR_MODSEL_DATA(IP10_3_0, IIC2_SDA_D, SEL_IIC2_3), 1356 PINMUX_IPSR_MSEL(IP10_3_0, IIC2_SDA_D, SEL_IIC2_3),
1358 PINMUX_IPSR_MODSEL_DATA(IP10_3_0, I2C2_SDA_D, SEL_I2C2_3), 1357 PINMUX_IPSR_MSEL(IP10_3_0, I2C2_SDA_D, SEL_I2C2_3),
1359 PINMUX_IPSR_MODSEL_DATA(IP10_3_0, SIM0_D_B, SEL_SIM_1), 1358 PINMUX_IPSR_MSEL(IP10_3_0, SIM0_D_B, SEL_SIM_1),
1360 PINMUX_IPSR_DATA(IP10_6_4, SD2_CLK), 1359 PINMUX_IPSR_DATA(IP10_6_4, SD2_CLK),
1361 PINMUX_IPSR_DATA(IP10_6_4, MMC0_CLK), 1360 PINMUX_IPSR_DATA(IP10_6_4, MMC0_CLK),
1362 PINMUX_IPSR_MODSEL_DATA(IP10_6_4, SIM0_CLK, SEL_SIM_0), 1361 PINMUX_IPSR_MSEL(IP10_6_4, SIM0_CLK, SEL_SIM_0),
1363 PINMUX_IPSR_MODSEL_DATA(IP10_6_4, VI0_DATA0_VI0_B0_B, SEL_VI0_1), 1362 PINMUX_IPSR_MSEL(IP10_6_4, VI0_DATA0_VI0_B0_B, SEL_VI0_1),
1364 PINMUX_IPSR_MODSEL_DATA(IP10_6_4, TS_SDEN0_C, SEL_TSIF0_2), 1363 PINMUX_IPSR_MSEL(IP10_6_4, TS_SDEN0_C, SEL_TSIF0_2),
1365 PINMUX_IPSR_MODSEL_DATA(IP10_6_4, GLO_SCLK_B, SEL_GPS_1), 1364 PINMUX_IPSR_MSEL(IP10_6_4, GLO_SCLK_B, SEL_GPS_1),
1366 PINMUX_IPSR_MODSEL_DATA(IP10_6_4, VI3_DATA0_B, SEL_VI3_1), 1365 PINMUX_IPSR_MSEL(IP10_6_4, VI3_DATA0_B, SEL_VI3_1),
1367 PINMUX_IPSR_DATA(IP10_10_7, SD2_CMD), 1366 PINMUX_IPSR_DATA(IP10_10_7, SD2_CMD),
1368 PINMUX_IPSR_DATA(IP10_10_7, MMC0_CMD), 1367 PINMUX_IPSR_DATA(IP10_10_7, MMC0_CMD),
1369 PINMUX_IPSR_MODSEL_DATA(IP10_10_7, SIM0_D, SEL_SIM_0), 1368 PINMUX_IPSR_MSEL(IP10_10_7, SIM0_D, SEL_SIM_0),
1370 PINMUX_IPSR_MODSEL_DATA(IP10_10_7, VI0_DATA1_VI0_B1_B, SEL_VI0_1), 1369 PINMUX_IPSR_MSEL(IP10_10_7, VI0_DATA1_VI0_B1_B, SEL_VI0_1),
1371 PINMUX_IPSR_MODSEL_DATA(IP10_10_7, SCIFB1_SCK_E, SEL_SCIFB1_4), 1370 PINMUX_IPSR_MSEL(IP10_10_7, SCIFB1_SCK_E, SEL_SCIFB1_4),
1372 PINMUX_IPSR_MODSEL_DATA(IP10_10_7, SCK1_D, SEL_SCIF1_3), 1371 PINMUX_IPSR_MSEL(IP10_10_7, SCK1_D, SEL_SCIF1_3),
1373 PINMUX_IPSR_MODSEL_DATA(IP10_10_7, TS_SPSYNC0_C, SEL_TSIF0_2), 1372 PINMUX_IPSR_MSEL(IP10_10_7, TS_SPSYNC0_C, SEL_TSIF0_2),
1374 PINMUX_IPSR_MODSEL_DATA(IP10_10_7, GLO_SDATA_B, SEL_GPS_1), 1373 PINMUX_IPSR_MSEL(IP10_10_7, GLO_SDATA_B, SEL_GPS_1),
1375 PINMUX_IPSR_MODSEL_DATA(IP10_10_7, VI3_DATA1_B, SEL_VI3_1), 1374 PINMUX_IPSR_MSEL(IP10_10_7, VI3_DATA1_B, SEL_VI3_1),
1376 PINMUX_IPSR_DATA(IP10_14_11, SD2_DAT0), 1375 PINMUX_IPSR_DATA(IP10_14_11, SD2_DAT0),
1377 PINMUX_IPSR_DATA(IP10_14_11, MMC0_D0), 1376 PINMUX_IPSR_DATA(IP10_14_11, MMC0_D0),
1378 PINMUX_IPSR_MODSEL_DATA(IP10_14_11, FMCLK_B, SEL_FM_1), 1377 PINMUX_IPSR_MSEL(IP10_14_11, FMCLK_B, SEL_FM_1),
1379 PINMUX_IPSR_MODSEL_DATA(IP10_14_11, VI0_DATA2_VI0_B2_B, SEL_VI0_1), 1378 PINMUX_IPSR_MSEL(IP10_14_11, VI0_DATA2_VI0_B2_B, SEL_VI0_1),
1380 PINMUX_IPSR_MODSEL_DATA(IP10_14_11, SCIFB1_RXD_E, SEL_SCIFB1_4), 1379 PINMUX_IPSR_MSEL(IP10_14_11, SCIFB1_RXD_E, SEL_SCIFB1_4),
1381 PINMUX_IPSR_MODSEL_DATA(IP10_14_11, RX1_D, SEL_SCIF1_3), 1380 PINMUX_IPSR_MSEL(IP10_14_11, RX1_D, SEL_SCIF1_3),
1382 PINMUX_IPSR_MODSEL_DATA(IP10_14_11, TS_SDAT0_C, SEL_TSIF0_2), 1381 PINMUX_IPSR_MSEL(IP10_14_11, TS_SDAT0_C, SEL_TSIF0_2),
1383 PINMUX_IPSR_MODSEL_DATA(IP10_14_11, GLO_SS_B, SEL_GPS_1), 1382 PINMUX_IPSR_MSEL(IP10_14_11, GLO_SS_B, SEL_GPS_1),
1384 PINMUX_IPSR_MODSEL_DATA(IP10_14_11, VI3_DATA2_B, SEL_VI3_1), 1383 PINMUX_IPSR_MSEL(IP10_14_11, VI3_DATA2_B, SEL_VI3_1),
1385 PINMUX_IPSR_DATA(IP10_18_15, SD2_DAT1), 1384 PINMUX_IPSR_DATA(IP10_18_15, SD2_DAT1),
1386 PINMUX_IPSR_DATA(IP10_18_15, MMC0_D1), 1385 PINMUX_IPSR_DATA(IP10_18_15, MMC0_D1),
1387 PINMUX_IPSR_MODSEL_DATA(IP10_18_15, FMIN_B, SEL_FM_1), 1386 PINMUX_IPSR_MSEL(IP10_18_15, FMIN_B, SEL_FM_1),
1388 PINMUX_IPSR_MODSEL_DATA(IP10_18_15, VI0_DATA3_VI0_B3_B, SEL_VI0_1), 1387 PINMUX_IPSR_MSEL(IP10_18_15, VI0_DATA3_VI0_B3_B, SEL_VI0_1),
1389 PINMUX_IPSR_MODSEL_DATA(IP10_18_15, SCIFB1_TXD_E, SEL_SCIFB1_4), 1388 PINMUX_IPSR_MSEL(IP10_18_15, SCIFB1_TXD_E, SEL_SCIFB1_4),
1390 PINMUX_IPSR_MODSEL_DATA(IP10_18_15, TX1_D, SEL_SCIF1_3), 1389 PINMUX_IPSR_MSEL(IP10_18_15, TX1_D, SEL_SCIF1_3),
1391 PINMUX_IPSR_MODSEL_DATA(IP10_18_15, TS_SCK0_C, SEL_TSIF0_2), 1390 PINMUX_IPSR_MSEL(IP10_18_15, TS_SCK0_C, SEL_TSIF0_2),
1392 PINMUX_IPSR_MODSEL_DATA(IP10_18_15, GLO_RFON_B, SEL_GPS_1), 1391 PINMUX_IPSR_MSEL(IP10_18_15, GLO_RFON_B, SEL_GPS_1),
1393 PINMUX_IPSR_MODSEL_DATA(IP10_18_15, VI3_DATA3_B, SEL_VI3_1), 1392 PINMUX_IPSR_MSEL(IP10_18_15, VI3_DATA3_B, SEL_VI3_1),
1394 PINMUX_IPSR_DATA(IP10_22_19, SD2_DAT2), 1393 PINMUX_IPSR_DATA(IP10_22_19, SD2_DAT2),
1395 PINMUX_IPSR_DATA(IP10_22_19, MMC0_D2), 1394 PINMUX_IPSR_DATA(IP10_22_19, MMC0_D2),
1396 PINMUX_IPSR_MODSEL_DATA(IP10_22_19, BPFCLK_B, SEL_FM_1), 1395 PINMUX_IPSR_MSEL(IP10_22_19, BPFCLK_B, SEL_FM_1),
1397 PINMUX_IPSR_MODSEL_DATA(IP10_22_19, VI0_DATA4_VI0_B4_B, SEL_VI0_1), 1396 PINMUX_IPSR_MSEL(IP10_22_19, VI0_DATA4_VI0_B4_B, SEL_VI0_1),
1398 PINMUX_IPSR_MODSEL_DATA(IP10_22_19, HRX0_D, SEL_HSCIF0_3), 1397 PINMUX_IPSR_MSEL(IP10_22_19, HRX0_D, SEL_HSCIF0_3),
1399 PINMUX_IPSR_MODSEL_DATA(IP10_22_19, TS_SDEN1_B, SEL_TSIF1_1), 1398 PINMUX_IPSR_MSEL(IP10_22_19, TS_SDEN1_B, SEL_TSIF1_1),
1400 PINMUX_IPSR_MODSEL_DATA(IP10_22_19, GLO_Q0_B, SEL_GPS_1), 1399 PINMUX_IPSR_MSEL(IP10_22_19, GLO_Q0_B, SEL_GPS_1),
1401 PINMUX_IPSR_MODSEL_DATA(IP10_22_19, VI3_DATA4_B, SEL_VI3_1), 1400 PINMUX_IPSR_MSEL(IP10_22_19, VI3_DATA4_B, SEL_VI3_1),
1402 PINMUX_IPSR_DATA(IP10_25_23, SD2_DAT3), 1401 PINMUX_IPSR_DATA(IP10_25_23, SD2_DAT3),
1403 PINMUX_IPSR_DATA(IP10_25_23, MMC0_D3), 1402 PINMUX_IPSR_DATA(IP10_25_23, MMC0_D3),
1404 PINMUX_IPSR_MODSEL_DATA(IP10_25_23, SIM0_RST, SEL_SIM_0), 1403 PINMUX_IPSR_MSEL(IP10_25_23, SIM0_RST, SEL_SIM_0),
1405 PINMUX_IPSR_MODSEL_DATA(IP10_25_23, VI0_DATA5_VI0_B5_B, SEL_VI0_1), 1404 PINMUX_IPSR_MSEL(IP10_25_23, VI0_DATA5_VI0_B5_B, SEL_VI0_1),
1406 PINMUX_IPSR_MODSEL_DATA(IP10_25_23, HTX0_D, SEL_HSCIF0_3), 1405 PINMUX_IPSR_MSEL(IP10_25_23, HTX0_D, SEL_HSCIF0_3),
1407 PINMUX_IPSR_MODSEL_DATA(IP10_25_23, TS_SPSYNC1_B, SEL_TSIF1_1), 1406 PINMUX_IPSR_MSEL(IP10_25_23, TS_SPSYNC1_B, SEL_TSIF1_1),
1408 PINMUX_IPSR_MODSEL_DATA(IP10_25_23, GLO_Q1_B, SEL_GPS_1), 1407 PINMUX_IPSR_MSEL(IP10_25_23, GLO_Q1_B, SEL_GPS_1),
1409 PINMUX_IPSR_MODSEL_DATA(IP10_25_23, VI3_DATA5_B, SEL_VI3_1), 1408 PINMUX_IPSR_MSEL(IP10_25_23, VI3_DATA5_B, SEL_VI3_1),
1410 PINMUX_IPSR_DATA(IP10_29_26, SD2_CD), 1409 PINMUX_IPSR_DATA(IP10_29_26, SD2_CD),
1411 PINMUX_IPSR_DATA(IP10_29_26, MMC0_D4), 1410 PINMUX_IPSR_DATA(IP10_29_26, MMC0_D4),
1412 PINMUX_IPSR_MODSEL_DATA(IP10_29_26, TS_SDAT0_B, SEL_TSIF0_1), 1411 PINMUX_IPSR_MSEL(IP10_29_26, TS_SDAT0_B, SEL_TSIF0_1),
1413 PINMUX_IPSR_DATA(IP10_29_26, USB2_EXTP), 1412 PINMUX_IPSR_DATA(IP10_29_26, USB2_EXTP),
1414 PINMUX_IPSR_MODSEL_DATA(IP10_29_26, GLO_I0, SEL_GPS_0), 1413 PINMUX_IPSR_MSEL(IP10_29_26, GLO_I0, SEL_GPS_0),
1415 PINMUX_IPSR_MODSEL_DATA(IP10_29_26, VI0_DATA6_VI0_B6_B, SEL_VI0_1), 1414 PINMUX_IPSR_MSEL(IP10_29_26, VI0_DATA6_VI0_B6_B, SEL_VI0_1),
1416 PINMUX_IPSR_MODSEL_DATA(IP10_29_26, HCTS0_N_D, SEL_HSCIF0_3), 1415 PINMUX_IPSR_MSEL(IP10_29_26, HCTS0_N_D, SEL_HSCIF0_3),
1417 PINMUX_IPSR_MODSEL_DATA(IP10_29_26, TS_SDAT1_B, SEL_TSIF1_1), 1416 PINMUX_IPSR_MSEL(IP10_29_26, TS_SDAT1_B, SEL_TSIF1_1),
1418 PINMUX_IPSR_MODSEL_DATA(IP10_29_26, GLO_I0_B, SEL_GPS_1), 1417 PINMUX_IPSR_MSEL(IP10_29_26, GLO_I0_B, SEL_GPS_1),
1419 PINMUX_IPSR_MODSEL_DATA(IP10_29_26, VI3_DATA6_B, SEL_VI3_1), 1418 PINMUX_IPSR_MSEL(IP10_29_26, VI3_DATA6_B, SEL_VI3_1),
1420 1419
1421 PINMUX_IPSR_DATA(IP11_3_0, SD2_WP), 1420 PINMUX_IPSR_DATA(IP11_3_0, SD2_WP),
1422 PINMUX_IPSR_DATA(IP11_3_0, MMC0_D5), 1421 PINMUX_IPSR_DATA(IP11_3_0, MMC0_D5),
1423 PINMUX_IPSR_MODSEL_DATA(IP11_3_0, TS_SCK0_B, SEL_TSIF0_1), 1422 PINMUX_IPSR_MSEL(IP11_3_0, TS_SCK0_B, SEL_TSIF0_1),
1424 PINMUX_IPSR_DATA(IP11_3_0, USB2_IDIN), 1423 PINMUX_IPSR_DATA(IP11_3_0, USB2_IDIN),
1425 PINMUX_IPSR_MODSEL_DATA(IP11_3_0, GLO_I1, SEL_GPS_0), 1424 PINMUX_IPSR_MSEL(IP11_3_0, GLO_I1, SEL_GPS_0),
1426 PINMUX_IPSR_MODSEL_DATA(IP11_3_0, VI0_DATA7_VI0_B7_B, SEL_VI0_1), 1425 PINMUX_IPSR_MSEL(IP11_3_0, VI0_DATA7_VI0_B7_B, SEL_VI0_1),
1427 PINMUX_IPSR_MODSEL_DATA(IP11_3_0, HRTS0_N_D, SEL_HSCIF0_3), 1426 PINMUX_IPSR_MSEL(IP11_3_0, HRTS0_N_D, SEL_HSCIF0_3),
1428 PINMUX_IPSR_MODSEL_DATA(IP11_3_0, TS_SCK1_B, SEL_TSIF1_1), 1427 PINMUX_IPSR_MSEL(IP11_3_0, TS_SCK1_B, SEL_TSIF1_1),
1429 PINMUX_IPSR_MODSEL_DATA(IP11_3_0, GLO_I1_B, SEL_GPS_1), 1428 PINMUX_IPSR_MSEL(IP11_3_0, GLO_I1_B, SEL_GPS_1),
1430 PINMUX_IPSR_MODSEL_DATA(IP11_3_0, VI3_DATA7_B, SEL_VI3_1), 1429 PINMUX_IPSR_MSEL(IP11_3_0, VI3_DATA7_B, SEL_VI3_1),
1431 PINMUX_IPSR_DATA(IP11_4, SD3_CLK), 1430 PINMUX_IPSR_DATA(IP11_4, SD3_CLK),
1432 PINMUX_IPSR_DATA(IP11_4, MMC1_CLK), 1431 PINMUX_IPSR_DATA(IP11_4, MMC1_CLK),
1433 PINMUX_IPSR_DATA(IP11_6_5, SD3_CMD), 1432 PINMUX_IPSR_DATA(IP11_6_5, SD3_CMD),
@@ -1447,298 +1446,298 @@ static const u16 pinmux_data[] = {
1447 PINMUX_IPSR_DATA(IP11_14_13, SCKZ), 1446 PINMUX_IPSR_DATA(IP11_14_13, SCKZ),
1448 PINMUX_IPSR_DATA(IP11_17_15, SD3_CD), 1447 PINMUX_IPSR_DATA(IP11_17_15, SD3_CD),
1449 PINMUX_IPSR_DATA(IP11_17_15, MMC1_D4), 1448 PINMUX_IPSR_DATA(IP11_17_15, MMC1_D4),
1450 PINMUX_IPSR_MODSEL_DATA(IP11_17_15, TS_SDAT1, SEL_TSIF1_0), 1449 PINMUX_IPSR_MSEL(IP11_17_15, TS_SDAT1, SEL_TSIF1_0),
1451 PINMUX_IPSR_DATA(IP11_17_15, VSP), 1450 PINMUX_IPSR_DATA(IP11_17_15, VSP),
1452 PINMUX_IPSR_MODSEL_DATA(IP11_17_15, GLO_Q0, SEL_GPS_0), 1451 PINMUX_IPSR_MSEL(IP11_17_15, GLO_Q0, SEL_GPS_0),
1453 PINMUX_IPSR_MODSEL_DATA(IP11_17_15, SIM0_RST_B, SEL_SIM_1), 1452 PINMUX_IPSR_MSEL(IP11_17_15, SIM0_RST_B, SEL_SIM_1),
1454 PINMUX_IPSR_DATA(IP11_21_18, SD3_WP), 1453 PINMUX_IPSR_DATA(IP11_21_18, SD3_WP),
1455 PINMUX_IPSR_DATA(IP11_21_18, MMC1_D5), 1454 PINMUX_IPSR_DATA(IP11_21_18, MMC1_D5),
1456 PINMUX_IPSR_MODSEL_DATA(IP11_21_18, TS_SCK1, SEL_TSIF1_0), 1455 PINMUX_IPSR_MSEL(IP11_21_18, TS_SCK1, SEL_TSIF1_0),
1457 PINMUX_IPSR_MODSEL_DATA(IP11_21_18, GLO_Q1, SEL_GPS_0), 1456 PINMUX_IPSR_MSEL(IP11_21_18, GLO_Q1, SEL_GPS_0),
1458 PINMUX_IPSR_MODSEL_DATA(IP11_21_18, FMIN_C, SEL_FM_2), 1457 PINMUX_IPSR_MSEL(IP11_21_18, FMIN_C, SEL_FM_2),
1459 PINMUX_IPSR_MODSEL_DATA(IP11_21_18, FMIN_E, SEL_FM_4), 1458 PINMUX_IPSR_MSEL(IP11_21_18, FMIN_E, SEL_FM_4),
1460 PINMUX_IPSR_MODSEL_DATA(IP11_21_18, FMIN_F, SEL_FM_5), 1459 PINMUX_IPSR_MSEL(IP11_21_18, FMIN_F, SEL_FM_5),
1461 PINMUX_IPSR_DATA(IP11_23_22, MLB_CLK), 1460 PINMUX_IPSR_DATA(IP11_23_22, MLB_CLK),
1462 PINMUX_IPSR_MODSEL_DATA(IP11_23_22, IIC2_SCL_B, SEL_IIC2_1), 1461 PINMUX_IPSR_MSEL(IP11_23_22, IIC2_SCL_B, SEL_IIC2_1),
1463 PINMUX_IPSR_MODSEL_DATA(IP11_23_22, I2C2_SCL_B, SEL_I2C2_1), 1462 PINMUX_IPSR_MSEL(IP11_23_22, I2C2_SCL_B, SEL_I2C2_1),
1464 PINMUX_IPSR_DATA(IP11_26_24, MLB_SIG), 1463 PINMUX_IPSR_DATA(IP11_26_24, MLB_SIG),
1465 PINMUX_IPSR_MODSEL_DATA(IP11_26_24, SCIFB1_RXD_D, SEL_SCIFB1_3), 1464 PINMUX_IPSR_MSEL(IP11_26_24, SCIFB1_RXD_D, SEL_SCIFB1_3),
1466 PINMUX_IPSR_MODSEL_DATA(IP11_26_24, RX1_C, SEL_SCIF1_2), 1465 PINMUX_IPSR_MSEL(IP11_26_24, RX1_C, SEL_SCIF1_2),
1467 PINMUX_IPSR_MODSEL_DATA(IP11_26_24, IIC2_SDA_B, SEL_IIC2_1), 1466 PINMUX_IPSR_MSEL(IP11_26_24, IIC2_SDA_B, SEL_IIC2_1),
1468 PINMUX_IPSR_MODSEL_DATA(IP11_26_24, I2C2_SDA_B, SEL_I2C2_1), 1467 PINMUX_IPSR_MSEL(IP11_26_24, I2C2_SDA_B, SEL_I2C2_1),
1469 PINMUX_IPSR_DATA(IP11_29_27, MLB_DAT), 1468 PINMUX_IPSR_DATA(IP11_29_27, MLB_DAT),
1470 PINMUX_IPSR_MODSEL_DATA(IP11_29_27, SCIFB1_TXD_D, SEL_SCIFB1_3), 1469 PINMUX_IPSR_MSEL(IP11_29_27, SCIFB1_TXD_D, SEL_SCIFB1_3),
1471 PINMUX_IPSR_MODSEL_DATA(IP11_29_27, TX1_C, SEL_SCIF1_2), 1470 PINMUX_IPSR_MSEL(IP11_29_27, TX1_C, SEL_SCIF1_2),
1472 PINMUX_IPSR_MODSEL_DATA(IP11_29_27, BPFCLK_C, SEL_FM_2), 1471 PINMUX_IPSR_MSEL(IP11_29_27, BPFCLK_C, SEL_FM_2),
1473 PINMUX_IPSR_DATA(IP11_31_30, SSI_SCK0129), 1472 PINMUX_IPSR_DATA(IP11_31_30, SSI_SCK0129),
1474 PINMUX_IPSR_MODSEL_DATA(IP11_31_30, CAN_CLK_B, SEL_CANCLK_1), 1473 PINMUX_IPSR_MSEL(IP11_31_30, CAN_CLK_B, SEL_CANCLK_1),
1475 PINMUX_IPSR_DATA(IP11_31_30, MOUT0), 1474 PINMUX_IPSR_DATA(IP11_31_30, MOUT0),
1476 1475
1477 PINMUX_IPSR_DATA(IP12_1_0, SSI_WS0129), 1476 PINMUX_IPSR_DATA(IP12_1_0, SSI_WS0129),
1478 PINMUX_IPSR_MODSEL_DATA(IP12_1_0, CAN0_TX_B, SEL_CAN0_1), 1477 PINMUX_IPSR_MSEL(IP12_1_0, CAN0_TX_B, SEL_CAN0_1),
1479 PINMUX_IPSR_DATA(IP12_1_0, MOUT1), 1478 PINMUX_IPSR_DATA(IP12_1_0, MOUT1),
1480 PINMUX_IPSR_DATA(IP12_3_2, SSI_SDATA0), 1479 PINMUX_IPSR_DATA(IP12_3_2, SSI_SDATA0),
1481 PINMUX_IPSR_MODSEL_DATA(IP12_3_2, CAN0_RX_B, SEL_CAN0_1), 1480 PINMUX_IPSR_MSEL(IP12_3_2, CAN0_RX_B, SEL_CAN0_1),
1482 PINMUX_IPSR_DATA(IP12_3_2, MOUT2), 1481 PINMUX_IPSR_DATA(IP12_3_2, MOUT2),
1483 PINMUX_IPSR_DATA(IP12_5_4, SSI_SDATA1), 1482 PINMUX_IPSR_DATA(IP12_5_4, SSI_SDATA1),
1484 PINMUX_IPSR_MODSEL_DATA(IP12_5_4, CAN1_TX_B, SEL_CAN1_1), 1483 PINMUX_IPSR_MSEL(IP12_5_4, CAN1_TX_B, SEL_CAN1_1),
1485 PINMUX_IPSR_DATA(IP12_5_4, MOUT5), 1484 PINMUX_IPSR_DATA(IP12_5_4, MOUT5),
1486 PINMUX_IPSR_DATA(IP12_7_6, SSI_SDATA2), 1485 PINMUX_IPSR_DATA(IP12_7_6, SSI_SDATA2),
1487 PINMUX_IPSR_MODSEL_DATA(IP12_7_6, CAN1_RX_B, SEL_CAN1_1), 1486 PINMUX_IPSR_MSEL(IP12_7_6, CAN1_RX_B, SEL_CAN1_1),
1488 PINMUX_IPSR_DATA(IP12_7_6, SSI_SCK1), 1487 PINMUX_IPSR_DATA(IP12_7_6, SSI_SCK1),
1489 PINMUX_IPSR_DATA(IP12_7_6, MOUT6), 1488 PINMUX_IPSR_DATA(IP12_7_6, MOUT6),
1490 PINMUX_IPSR_DATA(IP12_10_8, SSI_SCK34), 1489 PINMUX_IPSR_DATA(IP12_10_8, SSI_SCK34),
1491 PINMUX_IPSR_DATA(IP12_10_8, STP_OPWM_0), 1490 PINMUX_IPSR_DATA(IP12_10_8, STP_OPWM_0),
1492 PINMUX_IPSR_MODSEL_DATA(IP12_10_8, SCIFB0_SCK, SEL_SCIFB_0), 1491 PINMUX_IPSR_MSEL(IP12_10_8, SCIFB0_SCK, SEL_SCIFB_0),
1493 PINMUX_IPSR_MODSEL_DATA(IP12_10_8, MSIOF1_SCK, SEL_SOF1_0), 1492 PINMUX_IPSR_MSEL(IP12_10_8, MSIOF1_SCK, SEL_SOF1_0),
1494 PINMUX_IPSR_DATA(IP12_10_8, CAN_DEBUG_HW_TRIGGER), 1493 PINMUX_IPSR_DATA(IP12_10_8, CAN_DEBUG_HW_TRIGGER),
1495 PINMUX_IPSR_DATA(IP12_13_11, SSI_WS34), 1494 PINMUX_IPSR_DATA(IP12_13_11, SSI_WS34),
1496 PINMUX_IPSR_MODSEL_DATA(IP12_13_11, STP_IVCXO27_0, SEL_SSP_0), 1495 PINMUX_IPSR_MSEL(IP12_13_11, STP_IVCXO27_0, SEL_SSP_0),
1497 PINMUX_IPSR_MODSEL_DATA(IP12_13_11, SCIFB0_RXD, SEL_SCIFB_0), 1496 PINMUX_IPSR_MSEL(IP12_13_11, SCIFB0_RXD, SEL_SCIFB_0),
1498 PINMUX_IPSR_DATA(IP12_13_11, MSIOF1_SYNC), 1497 PINMUX_IPSR_DATA(IP12_13_11, MSIOF1_SYNC),
1499 PINMUX_IPSR_DATA(IP12_13_11, CAN_STEP0), 1498 PINMUX_IPSR_DATA(IP12_13_11, CAN_STEP0),
1500 PINMUX_IPSR_DATA(IP12_16_14, SSI_SDATA3), 1499 PINMUX_IPSR_DATA(IP12_16_14, SSI_SDATA3),
1501 PINMUX_IPSR_MODSEL_DATA(IP12_16_14, STP_ISCLK_0, SEL_SSP_0), 1500 PINMUX_IPSR_MSEL(IP12_16_14, STP_ISCLK_0, SEL_SSP_0),
1502 PINMUX_IPSR_MODSEL_DATA(IP12_16_14, SCIFB0_TXD, SEL_SCIFB_0), 1501 PINMUX_IPSR_MSEL(IP12_16_14, SCIFB0_TXD, SEL_SCIFB_0),
1503 PINMUX_IPSR_MODSEL_DATA(IP12_16_14, MSIOF1_SS1, SEL_SOF1_0), 1502 PINMUX_IPSR_MSEL(IP12_16_14, MSIOF1_SS1, SEL_SOF1_0),
1504 PINMUX_IPSR_DATA(IP12_16_14, CAN_TXCLK), 1503 PINMUX_IPSR_DATA(IP12_16_14, CAN_TXCLK),
1505 PINMUX_IPSR_DATA(IP12_19_17, SSI_SCK4), 1504 PINMUX_IPSR_DATA(IP12_19_17, SSI_SCK4),
1506 PINMUX_IPSR_MODSEL_DATA(IP12_19_17, STP_ISD_0, SEL_SSP_0), 1505 PINMUX_IPSR_MSEL(IP12_19_17, STP_ISD_0, SEL_SSP_0),
1507 PINMUX_IPSR_MODSEL_DATA(IP12_19_17, SCIFB0_CTS_N, SEL_SCIFB_0), 1506 PINMUX_IPSR_MSEL(IP12_19_17, SCIFB0_CTS_N, SEL_SCIFB_0),
1508 PINMUX_IPSR_MODSEL_DATA(IP12_19_17, MSIOF1_SS2, SEL_SOF1_0), 1507 PINMUX_IPSR_MSEL(IP12_19_17, MSIOF1_SS2, SEL_SOF1_0),
1509 PINMUX_IPSR_MODSEL_DATA(IP12_19_17, SSI_SCK5_C, SEL_SSI5_2), 1508 PINMUX_IPSR_MSEL(IP12_19_17, SSI_SCK5_C, SEL_SSI5_2),
1510 PINMUX_IPSR_DATA(IP12_19_17, CAN_DEBUGOUT0), 1509 PINMUX_IPSR_DATA(IP12_19_17, CAN_DEBUGOUT0),
1511 PINMUX_IPSR_DATA(IP12_22_20, SSI_WS4), 1510 PINMUX_IPSR_DATA(IP12_22_20, SSI_WS4),
1512 PINMUX_IPSR_MODSEL_DATA(IP12_22_20, STP_ISEN_0, SEL_SSP_0), 1511 PINMUX_IPSR_MSEL(IP12_22_20, STP_ISEN_0, SEL_SSP_0),
1513 PINMUX_IPSR_MODSEL_DATA(IP12_22_20, SCIFB0_RTS_N, SEL_SCIFB_0), 1512 PINMUX_IPSR_MSEL(IP12_22_20, SCIFB0_RTS_N, SEL_SCIFB_0),
1514 PINMUX_IPSR_MODSEL_DATA(IP12_22_20, MSIOF1_TXD, SEL_SOF1_0), 1513 PINMUX_IPSR_MSEL(IP12_22_20, MSIOF1_TXD, SEL_SOF1_0),
1515 PINMUX_IPSR_MODSEL_DATA(IP12_22_20, SSI_WS5_C, SEL_SSI5_2), 1514 PINMUX_IPSR_MSEL(IP12_22_20, SSI_WS5_C, SEL_SSI5_2),
1516 PINMUX_IPSR_DATA(IP12_22_20, CAN_DEBUGOUT1), 1515 PINMUX_IPSR_DATA(IP12_22_20, CAN_DEBUGOUT1),
1517 PINMUX_IPSR_DATA(IP12_24_23, SSI_SDATA4), 1516 PINMUX_IPSR_DATA(IP12_24_23, SSI_SDATA4),
1518 PINMUX_IPSR_MODSEL_DATA(IP12_24_23, STP_ISSYNC_0, SEL_SSP_0), 1517 PINMUX_IPSR_MSEL(IP12_24_23, STP_ISSYNC_0, SEL_SSP_0),
1519 PINMUX_IPSR_MODSEL_DATA(IP12_24_23, MSIOF1_RXD, SEL_SOF1_0), 1518 PINMUX_IPSR_MSEL(IP12_24_23, MSIOF1_RXD, SEL_SOF1_0),
1520 PINMUX_IPSR_DATA(IP12_24_23, CAN_DEBUGOUT2), 1519 PINMUX_IPSR_DATA(IP12_24_23, CAN_DEBUGOUT2),
1521 PINMUX_IPSR_MODSEL_DATA(IP12_27_25, SSI_SCK5, SEL_SSI5_0), 1520 PINMUX_IPSR_MSEL(IP12_27_25, SSI_SCK5, SEL_SSI5_0),
1522 PINMUX_IPSR_MODSEL_DATA(IP12_27_25, SCIFB1_SCK, SEL_SCIFB1_0), 1521 PINMUX_IPSR_MSEL(IP12_27_25, SCIFB1_SCK, SEL_SCIFB1_0),
1523 PINMUX_IPSR_MODSEL_DATA(IP12_27_25, IERX_B, SEL_IEB_1), 1522 PINMUX_IPSR_MSEL(IP12_27_25, IERX_B, SEL_IEB_1),
1524 PINMUX_IPSR_DATA(IP12_27_25, DU2_EXHSYNC_DU2_HSYNC), 1523 PINMUX_IPSR_DATA(IP12_27_25, DU2_EXHSYNC_DU2_HSYNC),
1525 PINMUX_IPSR_DATA(IP12_27_25, QSTH_QHS), 1524 PINMUX_IPSR_DATA(IP12_27_25, QSTH_QHS),
1526 PINMUX_IPSR_DATA(IP12_27_25, CAN_DEBUGOUT3), 1525 PINMUX_IPSR_DATA(IP12_27_25, CAN_DEBUGOUT3),
1527 PINMUX_IPSR_MODSEL_DATA(IP12_30_28, SSI_WS5, SEL_SSI5_0), 1526 PINMUX_IPSR_MSEL(IP12_30_28, SSI_WS5, SEL_SSI5_0),
1528 PINMUX_IPSR_MODSEL_DATA(IP12_30_28, SCIFB1_RXD, SEL_SCIFB1_0), 1527 PINMUX_IPSR_MSEL(IP12_30_28, SCIFB1_RXD, SEL_SCIFB1_0),
1529 PINMUX_IPSR_MODSEL_DATA(IP12_30_28, IECLK_B, SEL_IEB_1), 1528 PINMUX_IPSR_MSEL(IP12_30_28, IECLK_B, SEL_IEB_1),
1530 PINMUX_IPSR_DATA(IP12_30_28, DU2_EXVSYNC_DU2_VSYNC), 1529 PINMUX_IPSR_DATA(IP12_30_28, DU2_EXVSYNC_DU2_VSYNC),
1531 PINMUX_IPSR_DATA(IP12_30_28, QSTB_QHE), 1530 PINMUX_IPSR_DATA(IP12_30_28, QSTB_QHE),
1532 PINMUX_IPSR_DATA(IP12_30_28, CAN_DEBUGOUT4), 1531 PINMUX_IPSR_DATA(IP12_30_28, CAN_DEBUGOUT4),
1533 1532
1534 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SSI_SDATA5, SEL_SSI5_0), 1533 PINMUX_IPSR_MSEL(IP13_2_0, SSI_SDATA5, SEL_SSI5_0),
1535 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SCIFB1_TXD, SEL_SCIFB1_0), 1534 PINMUX_IPSR_MSEL(IP13_2_0, SCIFB1_TXD, SEL_SCIFB1_0),
1536 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, IETX_B, SEL_IEB_1), 1535 PINMUX_IPSR_MSEL(IP13_2_0, IETX_B, SEL_IEB_1),
1537 PINMUX_IPSR_DATA(IP13_2_0, DU2_DR2), 1536 PINMUX_IPSR_DATA(IP13_2_0, DU2_DR2),
1538 PINMUX_IPSR_DATA(IP13_2_0, LCDOUT2), 1537 PINMUX_IPSR_DATA(IP13_2_0, LCDOUT2),
1539 PINMUX_IPSR_DATA(IP13_2_0, CAN_DEBUGOUT5), 1538 PINMUX_IPSR_DATA(IP13_2_0, CAN_DEBUGOUT5),
1540 PINMUX_IPSR_MODSEL_DATA(IP13_6_3, SSI_SCK6, SEL_SSI6_0), 1539 PINMUX_IPSR_MSEL(IP13_6_3, SSI_SCK6, SEL_SSI6_0),
1541 PINMUX_IPSR_MODSEL_DATA(IP13_6_3, SCIFB1_CTS_N, SEL_SCIFB1_0), 1540 PINMUX_IPSR_MSEL(IP13_6_3, SCIFB1_CTS_N, SEL_SCIFB1_0),
1542 PINMUX_IPSR_MODSEL_DATA(IP13_6_3, BPFCLK_D, SEL_FM_3), 1541 PINMUX_IPSR_MSEL(IP13_6_3, BPFCLK_D, SEL_FM_3),
1543 PINMUX_IPSR_DATA(IP13_6_3, DU2_DR3), 1542 PINMUX_IPSR_DATA(IP13_6_3, DU2_DR3),
1544 PINMUX_IPSR_DATA(IP13_6_3, LCDOUT3), 1543 PINMUX_IPSR_DATA(IP13_6_3, LCDOUT3),
1545 PINMUX_IPSR_DATA(IP13_6_3, CAN_DEBUGOUT6), 1544 PINMUX_IPSR_DATA(IP13_6_3, CAN_DEBUGOUT6),
1546 PINMUX_IPSR_MODSEL_DATA(IP13_6_3, BPFCLK_F, SEL_FM_5), 1545 PINMUX_IPSR_MSEL(IP13_6_3, BPFCLK_F, SEL_FM_5),
1547 PINMUX_IPSR_MODSEL_DATA(IP13_9_7, SSI_WS6, SEL_SSI6_0), 1546 PINMUX_IPSR_MSEL(IP13_9_7, SSI_WS6, SEL_SSI6_0),
1548 PINMUX_IPSR_MODSEL_DATA(IP13_9_7, SCIFB1_RTS_N, SEL_SCIFB1_0), 1547 PINMUX_IPSR_MSEL(IP13_9_7, SCIFB1_RTS_N, SEL_SCIFB1_0),
1549 PINMUX_IPSR_MODSEL_DATA(IP13_9_7, CAN0_TX_D, SEL_CAN0_3), 1548 PINMUX_IPSR_MSEL(IP13_9_7, CAN0_TX_D, SEL_CAN0_3),
1550 PINMUX_IPSR_DATA(IP13_9_7, DU2_DR4), 1549 PINMUX_IPSR_DATA(IP13_9_7, DU2_DR4),
1551 PINMUX_IPSR_DATA(IP13_9_7, LCDOUT4), 1550 PINMUX_IPSR_DATA(IP13_9_7, LCDOUT4),
1552 PINMUX_IPSR_DATA(IP13_9_7, CAN_DEBUGOUT7), 1551 PINMUX_IPSR_DATA(IP13_9_7, CAN_DEBUGOUT7),
1553 PINMUX_IPSR_MODSEL_DATA(IP13_12_10, SSI_SDATA6, SEL_SSI6_0), 1552 PINMUX_IPSR_MSEL(IP13_12_10, SSI_SDATA6, SEL_SSI6_0),
1554 PINMUX_IPSR_MODSEL_DATA(IP13_12_10, FMIN_D, SEL_FM_3), 1553 PINMUX_IPSR_MSEL(IP13_12_10, FMIN_D, SEL_FM_3),
1555 PINMUX_IPSR_DATA(IP13_12_10, DU2_DR5), 1554 PINMUX_IPSR_DATA(IP13_12_10, DU2_DR5),
1556 PINMUX_IPSR_DATA(IP13_12_10, LCDOUT5), 1555 PINMUX_IPSR_DATA(IP13_12_10, LCDOUT5),
1557 PINMUX_IPSR_DATA(IP13_12_10, CAN_DEBUGOUT8), 1556 PINMUX_IPSR_DATA(IP13_12_10, CAN_DEBUGOUT8),
1558 PINMUX_IPSR_MODSEL_DATA(IP13_15_13, SSI_SCK78, SEL_SSI7_0), 1557 PINMUX_IPSR_MSEL(IP13_15_13, SSI_SCK78, SEL_SSI7_0),
1559 PINMUX_IPSR_MODSEL_DATA(IP13_15_13, STP_IVCXO27_1, SEL_SSP_0), 1558 PINMUX_IPSR_MSEL(IP13_15_13, STP_IVCXO27_1, SEL_SSP_0),
1560 PINMUX_IPSR_MODSEL_DATA(IP13_15_13, SCK1, SEL_SCIF1_0), 1559 PINMUX_IPSR_MSEL(IP13_15_13, SCK1, SEL_SCIF1_0),
1561 PINMUX_IPSR_MODSEL_DATA(IP13_15_13, SCIFA1_SCK, SEL_SCIFA1_0), 1560 PINMUX_IPSR_MSEL(IP13_15_13, SCIFA1_SCK, SEL_SCIFA1_0),
1562 PINMUX_IPSR_DATA(IP13_15_13, DU2_DR6), 1561 PINMUX_IPSR_DATA(IP13_15_13, DU2_DR6),
1563 PINMUX_IPSR_DATA(IP13_15_13, LCDOUT6), 1562 PINMUX_IPSR_DATA(IP13_15_13, LCDOUT6),
1564 PINMUX_IPSR_DATA(IP13_15_13, CAN_DEBUGOUT9), 1563 PINMUX_IPSR_DATA(IP13_15_13, CAN_DEBUGOUT9),
1565 PINMUX_IPSR_MODSEL_DATA(IP13_18_16, SSI_WS78, SEL_SSI7_0), 1564 PINMUX_IPSR_MSEL(IP13_18_16, SSI_WS78, SEL_SSI7_0),
1566 PINMUX_IPSR_MODSEL_DATA(IP13_18_16, STP_ISCLK_1, SEL_SSP_0), 1565 PINMUX_IPSR_MSEL(IP13_18_16, STP_ISCLK_1, SEL_SSP_0),
1567 PINMUX_IPSR_MODSEL_DATA(IP13_18_16, SCIFB2_SCK, SEL_SCIFB2_0), 1566 PINMUX_IPSR_MSEL(IP13_18_16, SCIFB2_SCK, SEL_SCIFB2_0),
1568 PINMUX_IPSR_DATA(IP13_18_16, SCIFA2_CTS_N), 1567 PINMUX_IPSR_DATA(IP13_18_16, SCIFA2_CTS_N),
1569 PINMUX_IPSR_DATA(IP13_18_16, DU2_DR7), 1568 PINMUX_IPSR_DATA(IP13_18_16, DU2_DR7),
1570 PINMUX_IPSR_DATA(IP13_18_16, LCDOUT7), 1569 PINMUX_IPSR_DATA(IP13_18_16, LCDOUT7),
1571 PINMUX_IPSR_DATA(IP13_18_16, CAN_DEBUGOUT10), 1570 PINMUX_IPSR_DATA(IP13_18_16, CAN_DEBUGOUT10),
1572 PINMUX_IPSR_MODSEL_DATA(IP13_22_19, SSI_SDATA7, SEL_SSI7_0), 1571 PINMUX_IPSR_MSEL(IP13_22_19, SSI_SDATA7, SEL_SSI7_0),
1573 PINMUX_IPSR_MODSEL_DATA(IP13_22_19, STP_ISD_1, SEL_SSP_0), 1572 PINMUX_IPSR_MSEL(IP13_22_19, STP_ISD_1, SEL_SSP_0),
1574 PINMUX_IPSR_MODSEL_DATA(IP13_22_19, SCIFB2_RXD, SEL_SCIFB2_0), 1573 PINMUX_IPSR_MSEL(IP13_22_19, SCIFB2_RXD, SEL_SCIFB2_0),
1575 PINMUX_IPSR_DATA(IP13_22_19, SCIFA2_RTS_N), 1574 PINMUX_IPSR_DATA(IP13_22_19, SCIFA2_RTS_N),
1576 PINMUX_IPSR_DATA(IP13_22_19, TCLK2), 1575 PINMUX_IPSR_DATA(IP13_22_19, TCLK2),
1577 PINMUX_IPSR_DATA(IP13_22_19, QSTVA_QVS), 1576 PINMUX_IPSR_DATA(IP13_22_19, QSTVA_QVS),
1578 PINMUX_IPSR_DATA(IP13_22_19, CAN_DEBUGOUT11), 1577 PINMUX_IPSR_DATA(IP13_22_19, CAN_DEBUGOUT11),
1579 PINMUX_IPSR_MODSEL_DATA(IP13_22_19, BPFCLK_E, SEL_FM_4), 1578 PINMUX_IPSR_MSEL(IP13_22_19, BPFCLK_E, SEL_FM_4),
1580 PINMUX_IPSR_MODSEL_DATA(IP13_22_19, SSI_SDATA7_B, SEL_SSI7_1), 1579 PINMUX_IPSR_MSEL(IP13_22_19, SSI_SDATA7_B, SEL_SSI7_1),
1581 PINMUX_IPSR_MODSEL_DATA(IP13_22_19, FMIN_G, SEL_FM_6), 1580 PINMUX_IPSR_MSEL(IP13_22_19, FMIN_G, SEL_FM_6),
1582 PINMUX_IPSR_MODSEL_DATA(IP13_25_23, SSI_SDATA8, SEL_SSI8_0), 1581 PINMUX_IPSR_MSEL(IP13_25_23, SSI_SDATA8, SEL_SSI8_0),
1583 PINMUX_IPSR_MODSEL_DATA(IP13_25_23, STP_ISEN_1, SEL_SSP_0), 1582 PINMUX_IPSR_MSEL(IP13_25_23, STP_ISEN_1, SEL_SSP_0),
1584 PINMUX_IPSR_MODSEL_DATA(IP13_25_23, SCIFB2_TXD, SEL_SCIFB2_0), 1583 PINMUX_IPSR_MSEL(IP13_25_23, SCIFB2_TXD, SEL_SCIFB2_0),
1585 PINMUX_IPSR_MODSEL_DATA(IP13_25_23, CAN0_TX_C, SEL_CAN0_2), 1584 PINMUX_IPSR_MSEL(IP13_25_23, CAN0_TX_C, SEL_CAN0_2),
1586 PINMUX_IPSR_DATA(IP13_25_23, CAN_DEBUGOUT12), 1585 PINMUX_IPSR_DATA(IP13_25_23, CAN_DEBUGOUT12),
1587 PINMUX_IPSR_MODSEL_DATA(IP13_25_23, SSI_SDATA8_B, SEL_SSI8_1), 1586 PINMUX_IPSR_MSEL(IP13_25_23, SSI_SDATA8_B, SEL_SSI8_1),
1588 PINMUX_IPSR_DATA(IP13_28_26, SSI_SDATA9), 1587 PINMUX_IPSR_DATA(IP13_28_26, SSI_SDATA9),
1589 PINMUX_IPSR_MODSEL_DATA(IP13_28_26, STP_ISSYNC_1, SEL_SSP_0), 1588 PINMUX_IPSR_MSEL(IP13_28_26, STP_ISSYNC_1, SEL_SSP_0),
1590 PINMUX_IPSR_MODSEL_DATA(IP13_28_26, SCIFB2_CTS_N, SEL_SCIFB2_0), 1589 PINMUX_IPSR_MSEL(IP13_28_26, SCIFB2_CTS_N, SEL_SCIFB2_0),
1591 PINMUX_IPSR_DATA(IP13_28_26, SSI_WS1), 1590 PINMUX_IPSR_DATA(IP13_28_26, SSI_WS1),
1592 PINMUX_IPSR_MODSEL_DATA(IP13_28_26, SSI_SDATA5_C, SEL_SSI5_2), 1591 PINMUX_IPSR_MSEL(IP13_28_26, SSI_SDATA5_C, SEL_SSI5_2),
1593 PINMUX_IPSR_DATA(IP13_28_26, CAN_DEBUGOUT13), 1592 PINMUX_IPSR_DATA(IP13_28_26, CAN_DEBUGOUT13),
1594 PINMUX_IPSR_DATA(IP13_30_29, AUDIO_CLKA), 1593 PINMUX_IPSR_DATA(IP13_30_29, AUDIO_CLKA),
1595 PINMUX_IPSR_MODSEL_DATA(IP13_30_29, SCIFB2_RTS_N, SEL_SCIFB2_0), 1594 PINMUX_IPSR_MSEL(IP13_30_29, SCIFB2_RTS_N, SEL_SCIFB2_0),
1596 PINMUX_IPSR_DATA(IP13_30_29, CAN_DEBUGOUT14), 1595 PINMUX_IPSR_DATA(IP13_30_29, CAN_DEBUGOUT14),
1597 1596
1598 PINMUX_IPSR_DATA(IP14_2_0, AUDIO_CLKB), 1597 PINMUX_IPSR_DATA(IP14_2_0, AUDIO_CLKB),
1599 PINMUX_IPSR_MODSEL_DATA(IP14_2_0, SCIF_CLK, SEL_SCIFCLK_0), 1598 PINMUX_IPSR_MSEL(IP14_2_0, SCIF_CLK, SEL_SCIFCLK_0),
1600 PINMUX_IPSR_MODSEL_DATA(IP14_2_0, CAN0_RX_D, SEL_CAN0_3), 1599 PINMUX_IPSR_MSEL(IP14_2_0, CAN0_RX_D, SEL_CAN0_3),
1601 PINMUX_IPSR_DATA(IP14_2_0, DVC_MUTE), 1600 PINMUX_IPSR_DATA(IP14_2_0, DVC_MUTE),
1602 PINMUX_IPSR_MODSEL_DATA(IP14_2_0, CAN0_RX_C, SEL_CAN0_2), 1601 PINMUX_IPSR_MSEL(IP14_2_0, CAN0_RX_C, SEL_CAN0_2),
1603 PINMUX_IPSR_DATA(IP14_2_0, CAN_DEBUGOUT15), 1602 PINMUX_IPSR_DATA(IP14_2_0, CAN_DEBUGOUT15),
1604 PINMUX_IPSR_DATA(IP14_2_0, REMOCON), 1603 PINMUX_IPSR_DATA(IP14_2_0, REMOCON),
1605 PINMUX_IPSR_MODSEL_DATA(IP14_5_3, SCIFA0_SCK, SEL_SCFA_0), 1604 PINMUX_IPSR_MSEL(IP14_5_3, SCIFA0_SCK, SEL_SCFA_0),
1606 PINMUX_IPSR_MODSEL_DATA(IP14_5_3, HSCK1, SEL_HSCIF1_0), 1605 PINMUX_IPSR_MSEL(IP14_5_3, HSCK1, SEL_HSCIF1_0),
1607 PINMUX_IPSR_DATA(IP14_5_3, SCK0), 1606 PINMUX_IPSR_DATA(IP14_5_3, SCK0),
1608 PINMUX_IPSR_DATA(IP14_5_3, MSIOF3_SS2), 1607 PINMUX_IPSR_DATA(IP14_5_3, MSIOF3_SS2),
1609 PINMUX_IPSR_DATA(IP14_5_3, DU2_DG2), 1608 PINMUX_IPSR_DATA(IP14_5_3, DU2_DG2),
1610 PINMUX_IPSR_DATA(IP14_5_3, LCDOUT10), 1609 PINMUX_IPSR_DATA(IP14_5_3, LCDOUT10),
1611 PINMUX_IPSR_MODSEL_DATA(IP14_5_3, IIC1_SDA_C, SEL_IIC1_2), 1610 PINMUX_IPSR_MSEL(IP14_5_3, IIC1_SDA_C, SEL_IIC1_2),
1612 PINMUX_IPSR_MODSEL_DATA(IP14_5_3, I2C1_SDA_C, SEL_I2C1_2), 1611 PINMUX_IPSR_MSEL(IP14_5_3, I2C1_SDA_C, SEL_I2C1_2),
1613 PINMUX_IPSR_MODSEL_DATA(IP14_8_6, SCIFA0_RXD, SEL_SCFA_0), 1612 PINMUX_IPSR_MSEL(IP14_8_6, SCIFA0_RXD, SEL_SCFA_0),
1614 PINMUX_IPSR_MODSEL_DATA(IP14_8_6, HRX1, SEL_HSCIF1_0), 1613 PINMUX_IPSR_MSEL(IP14_8_6, HRX1, SEL_HSCIF1_0),
1615 PINMUX_IPSR_MODSEL_DATA(IP14_8_6, RX0, SEL_SCIF0_0), 1614 PINMUX_IPSR_MSEL(IP14_8_6, RX0, SEL_SCIF0_0),
1616 PINMUX_IPSR_DATA(IP14_8_6, DU2_DR0), 1615 PINMUX_IPSR_DATA(IP14_8_6, DU2_DR0),
1617 PINMUX_IPSR_DATA(IP14_8_6, LCDOUT0), 1616 PINMUX_IPSR_DATA(IP14_8_6, LCDOUT0),
1618 PINMUX_IPSR_MODSEL_DATA(IP14_11_9, SCIFA0_TXD, SEL_SCFA_0), 1617 PINMUX_IPSR_MSEL(IP14_11_9, SCIFA0_TXD, SEL_SCFA_0),
1619 PINMUX_IPSR_MODSEL_DATA(IP14_11_9, HTX1, SEL_HSCIF1_0), 1618 PINMUX_IPSR_MSEL(IP14_11_9, HTX1, SEL_HSCIF1_0),
1620 PINMUX_IPSR_MODSEL_DATA(IP14_11_9, TX0, SEL_SCIF0_0), 1619 PINMUX_IPSR_MSEL(IP14_11_9, TX0, SEL_SCIF0_0),
1621 PINMUX_IPSR_DATA(IP14_11_9, DU2_DR1), 1620 PINMUX_IPSR_DATA(IP14_11_9, DU2_DR1),
1622 PINMUX_IPSR_DATA(IP14_11_9, LCDOUT1), 1621 PINMUX_IPSR_DATA(IP14_11_9, LCDOUT1),
1623 PINMUX_IPSR_MODSEL_DATA(IP14_15_12, SCIFA0_CTS_N, SEL_SCFA_0), 1622 PINMUX_IPSR_MSEL(IP14_15_12, SCIFA0_CTS_N, SEL_SCFA_0),
1624 PINMUX_IPSR_MODSEL_DATA(IP14_15_12, HCTS1_N, SEL_HSCIF1_0), 1623 PINMUX_IPSR_MSEL(IP14_15_12, HCTS1_N, SEL_HSCIF1_0),
1625 PINMUX_IPSR_DATA(IP14_15_12, CTS0_N), 1624 PINMUX_IPSR_DATA(IP14_15_12, CTS0_N),
1626 PINMUX_IPSR_MODSEL_DATA(IP14_15_12, MSIOF3_SYNC, SEL_SOF3_0), 1625 PINMUX_IPSR_MSEL(IP14_15_12, MSIOF3_SYNC, SEL_SOF3_0),
1627 PINMUX_IPSR_DATA(IP14_15_12, DU2_DG3), 1626 PINMUX_IPSR_DATA(IP14_15_12, DU2_DG3),
1628 PINMUX_IPSR_DATA(IP14_15_12, LCDOUT11), 1627 PINMUX_IPSR_DATA(IP14_15_12, LCDOUT11),
1629 PINMUX_IPSR_DATA(IP14_15_12, PWM0_B), 1628 PINMUX_IPSR_DATA(IP14_15_12, PWM0_B),
1630 PINMUX_IPSR_MODSEL_DATA(IP14_15_12, IIC1_SCL_C, SEL_IIC1_2), 1629 PINMUX_IPSR_MSEL(IP14_15_12, IIC1_SCL_C, SEL_IIC1_2),
1631 PINMUX_IPSR_MODSEL_DATA(IP14_15_12, I2C1_SCL_C, SEL_I2C1_2), 1630 PINMUX_IPSR_MSEL(IP14_15_12, I2C1_SCL_C, SEL_I2C1_2),
1632 PINMUX_IPSR_MODSEL_DATA(IP14_18_16, SCIFA0_RTS_N, SEL_SCFA_0), 1631 PINMUX_IPSR_MSEL(IP14_18_16, SCIFA0_RTS_N, SEL_SCFA_0),
1633 PINMUX_IPSR_MODSEL_DATA(IP14_18_16, HRTS1_N, SEL_HSCIF1_0), 1632 PINMUX_IPSR_MSEL(IP14_18_16, HRTS1_N, SEL_HSCIF1_0),
1634 PINMUX_IPSR_DATA(IP14_18_16, RTS0_N), 1633 PINMUX_IPSR_DATA(IP14_18_16, RTS0_N),
1635 PINMUX_IPSR_DATA(IP14_18_16, MSIOF3_SS1), 1634 PINMUX_IPSR_DATA(IP14_18_16, MSIOF3_SS1),
1636 PINMUX_IPSR_DATA(IP14_18_16, DU2_DG0), 1635 PINMUX_IPSR_DATA(IP14_18_16, DU2_DG0),
1637 PINMUX_IPSR_DATA(IP14_18_16, LCDOUT8), 1636 PINMUX_IPSR_DATA(IP14_18_16, LCDOUT8),
1638 PINMUX_IPSR_DATA(IP14_18_16, PWM1_B), 1637 PINMUX_IPSR_DATA(IP14_18_16, PWM1_B),
1639 PINMUX_IPSR_MODSEL_DATA(IP14_21_19, SCIFA1_RXD, SEL_SCIFA1_0), 1638 PINMUX_IPSR_MSEL(IP14_21_19, SCIFA1_RXD, SEL_SCIFA1_0),
1640 PINMUX_IPSR_MODSEL_DATA(IP14_21_19, AD_DI, SEL_ADI_0), 1639 PINMUX_IPSR_MSEL(IP14_21_19, AD_DI, SEL_ADI_0),
1641 PINMUX_IPSR_MODSEL_DATA(IP14_21_19, RX1, SEL_SCIF1_0), 1640 PINMUX_IPSR_MSEL(IP14_21_19, RX1, SEL_SCIF1_0),
1642 PINMUX_IPSR_DATA(IP14_21_19, DU2_EXODDF_DU2_ODDF_DISP_CDE), 1641 PINMUX_IPSR_DATA(IP14_21_19, DU2_EXODDF_DU2_ODDF_DISP_CDE),
1643 PINMUX_IPSR_DATA(IP14_21_19, QCPV_QDE), 1642 PINMUX_IPSR_DATA(IP14_21_19, QCPV_QDE),
1644 PINMUX_IPSR_MODSEL_DATA(IP14_24_22, SCIFA1_TXD, SEL_SCIFA1_0), 1643 PINMUX_IPSR_MSEL(IP14_24_22, SCIFA1_TXD, SEL_SCIFA1_0),
1645 PINMUX_IPSR_MODSEL_DATA(IP14_24_22, AD_DO, SEL_ADI_0), 1644 PINMUX_IPSR_MSEL(IP14_24_22, AD_DO, SEL_ADI_0),
1646 PINMUX_IPSR_MODSEL_DATA(IP14_24_22, TX1, SEL_SCIF1_0), 1645 PINMUX_IPSR_MSEL(IP14_24_22, TX1, SEL_SCIF1_0),
1647 PINMUX_IPSR_DATA(IP14_24_22, DU2_DG1), 1646 PINMUX_IPSR_DATA(IP14_24_22, DU2_DG1),
1648 PINMUX_IPSR_DATA(IP14_24_22, LCDOUT9), 1647 PINMUX_IPSR_DATA(IP14_24_22, LCDOUT9),
1649 PINMUX_IPSR_MODSEL_DATA(IP14_27_25, SCIFA1_CTS_N, SEL_SCIFA1_0), 1648 PINMUX_IPSR_MSEL(IP14_27_25, SCIFA1_CTS_N, SEL_SCIFA1_0),
1650 PINMUX_IPSR_MODSEL_DATA(IP14_27_25, AD_CLK, SEL_ADI_0), 1649 PINMUX_IPSR_MSEL(IP14_27_25, AD_CLK, SEL_ADI_0),
1651 PINMUX_IPSR_DATA(IP14_27_25, CTS1_N), 1650 PINMUX_IPSR_DATA(IP14_27_25, CTS1_N),
1652 PINMUX_IPSR_MODSEL_DATA(IP14_27_25, MSIOF3_RXD, SEL_SOF3_0), 1651 PINMUX_IPSR_MSEL(IP14_27_25, MSIOF3_RXD, SEL_SOF3_0),
1653 PINMUX_IPSR_DATA(IP14_27_25, DU0_DOTCLKOUT), 1652 PINMUX_IPSR_DATA(IP14_27_25, DU0_DOTCLKOUT),
1654 PINMUX_IPSR_DATA(IP14_27_25, QCLK), 1653 PINMUX_IPSR_DATA(IP14_27_25, QCLK),
1655 PINMUX_IPSR_MODSEL_DATA(IP14_30_28, SCIFA1_RTS_N, SEL_SCIFA1_0), 1654 PINMUX_IPSR_MSEL(IP14_30_28, SCIFA1_RTS_N, SEL_SCIFA1_0),
1656 PINMUX_IPSR_MODSEL_DATA(IP14_30_28, AD_NCS_N, SEL_ADI_0), 1655 PINMUX_IPSR_MSEL(IP14_30_28, AD_NCS_N, SEL_ADI_0),
1657 PINMUX_IPSR_DATA(IP14_30_28, RTS1_N), 1656 PINMUX_IPSR_DATA(IP14_30_28, RTS1_N),
1658 PINMUX_IPSR_MODSEL_DATA(IP14_30_28, MSIOF3_TXD, SEL_SOF3_0), 1657 PINMUX_IPSR_MSEL(IP14_30_28, MSIOF3_TXD, SEL_SOF3_0),
1659 PINMUX_IPSR_DATA(IP14_30_28, DU1_DOTCLKOUT), 1658 PINMUX_IPSR_DATA(IP14_30_28, DU1_DOTCLKOUT),
1660 PINMUX_IPSR_DATA(IP14_30_28, QSTVB_QVE), 1659 PINMUX_IPSR_DATA(IP14_30_28, QSTVB_QVE),
1661 PINMUX_IPSR_MODSEL_DATA(IP14_30_28, HRTS0_N_C, SEL_HSCIF0_2), 1660 PINMUX_IPSR_MSEL(IP14_30_28, HRTS0_N_C, SEL_HSCIF0_2),
1662 1661
1663 PINMUX_IPSR_MODSEL_DATA(IP15_2_0, SCIFA2_SCK, SEL_SCIFA2_0), 1662 PINMUX_IPSR_MSEL(IP15_2_0, SCIFA2_SCK, SEL_SCIFA2_0),
1664 PINMUX_IPSR_MODSEL_DATA(IP15_2_0, FMCLK, SEL_FM_0), 1663 PINMUX_IPSR_MSEL(IP15_2_0, FMCLK, SEL_FM_0),
1665 PINMUX_IPSR_DATA(IP15_2_0, SCK2), 1664 PINMUX_IPSR_DATA(IP15_2_0, SCK2),
1666 PINMUX_IPSR_MODSEL_DATA(IP15_2_0, MSIOF3_SCK, SEL_SOF3_0), 1665 PINMUX_IPSR_MSEL(IP15_2_0, MSIOF3_SCK, SEL_SOF3_0),
1667 PINMUX_IPSR_DATA(IP15_2_0, DU2_DG7), 1666 PINMUX_IPSR_DATA(IP15_2_0, DU2_DG7),
1668 PINMUX_IPSR_DATA(IP15_2_0, LCDOUT15), 1667 PINMUX_IPSR_DATA(IP15_2_0, LCDOUT15),
1669 PINMUX_IPSR_MODSEL_DATA(IP15_2_0, SCIF_CLK_B, SEL_SCIFCLK_1), 1668 PINMUX_IPSR_MSEL(IP15_2_0, SCIF_CLK_B, SEL_SCIFCLK_1),
1670 PINMUX_IPSR_MODSEL_DATA(IP15_5_3, SCIFA2_RXD, SEL_SCIFA2_0), 1669 PINMUX_IPSR_MSEL(IP15_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
1671 PINMUX_IPSR_MODSEL_DATA(IP15_5_3, FMIN, SEL_FM_0), 1670 PINMUX_IPSR_MSEL(IP15_5_3, FMIN, SEL_FM_0),
1672 PINMUX_IPSR_MODSEL_DATA(IP15_5_3, TX2, SEL_SCIF2_0), 1671 PINMUX_IPSR_MSEL(IP15_5_3, TX2, SEL_SCIF2_0),
1673 PINMUX_IPSR_DATA(IP15_5_3, DU2_DB0), 1672 PINMUX_IPSR_DATA(IP15_5_3, DU2_DB0),
1674 PINMUX_IPSR_DATA(IP15_5_3, LCDOUT16), 1673 PINMUX_IPSR_DATA(IP15_5_3, LCDOUT16),
1675 PINMUX_IPSR_MODSEL_DATA(IP15_5_3, IIC2_SCL, SEL_IIC2_0), 1674 PINMUX_IPSR_MSEL(IP15_5_3, IIC2_SCL, SEL_IIC2_0),
1676 PINMUX_IPSR_MODSEL_DATA(IP15_5_3, I2C2_SCL, SEL_I2C2_0), 1675 PINMUX_IPSR_MSEL(IP15_5_3, I2C2_SCL, SEL_I2C2_0),
1677 PINMUX_IPSR_MODSEL_DATA(IP15_8_6, SCIFA2_TXD, SEL_SCIFA2_0), 1676 PINMUX_IPSR_MSEL(IP15_8_6, SCIFA2_TXD, SEL_SCIFA2_0),
1678 PINMUX_IPSR_MODSEL_DATA(IP15_8_6, BPFCLK, SEL_FM_0), 1677 PINMUX_IPSR_MSEL(IP15_8_6, BPFCLK, SEL_FM_0),
1679 PINMUX_IPSR_MODSEL_DATA(IP15_8_6, RX2, SEL_SCIF2_0), 1678 PINMUX_IPSR_MSEL(IP15_8_6, RX2, SEL_SCIF2_0),
1680 PINMUX_IPSR_DATA(IP15_8_6, DU2_DB1), 1679 PINMUX_IPSR_DATA(IP15_8_6, DU2_DB1),
1681 PINMUX_IPSR_DATA(IP15_8_6, LCDOUT17), 1680 PINMUX_IPSR_DATA(IP15_8_6, LCDOUT17),
1682 PINMUX_IPSR_MODSEL_DATA(IP15_8_6, IIC2_SDA, SEL_IIC2_0), 1681 PINMUX_IPSR_MSEL(IP15_8_6, IIC2_SDA, SEL_IIC2_0),
1683 PINMUX_IPSR_MODSEL_DATA(IP15_8_6, I2C2_SDA, SEL_I2C2_0), 1682 PINMUX_IPSR_MSEL(IP15_8_6, I2C2_SDA, SEL_I2C2_0),
1684 PINMUX_IPSR_DATA(IP15_11_9, HSCK0), 1683 PINMUX_IPSR_DATA(IP15_11_9, HSCK0),
1685 PINMUX_IPSR_MODSEL_DATA(IP15_11_9, TS_SDEN0, SEL_TSIF0_0), 1684 PINMUX_IPSR_MSEL(IP15_11_9, TS_SDEN0, SEL_TSIF0_0),
1686 PINMUX_IPSR_DATA(IP15_11_9, DU2_DG4), 1685 PINMUX_IPSR_DATA(IP15_11_9, DU2_DG4),
1687 PINMUX_IPSR_DATA(IP15_11_9, LCDOUT12), 1686 PINMUX_IPSR_DATA(IP15_11_9, LCDOUT12),
1688 PINMUX_IPSR_MODSEL_DATA(IP15_11_9, HCTS0_N_C, SEL_HSCIF0_2), 1687 PINMUX_IPSR_MSEL(IP15_11_9, HCTS0_N_C, SEL_HSCIF0_2),
1689 PINMUX_IPSR_MODSEL_DATA(IP15_13_12, HRX0, SEL_HSCIF0_0), 1688 PINMUX_IPSR_MSEL(IP15_13_12, HRX0, SEL_HSCIF0_0),
1690 PINMUX_IPSR_DATA(IP15_13_12, DU2_DB2), 1689 PINMUX_IPSR_DATA(IP15_13_12, DU2_DB2),
1691 PINMUX_IPSR_DATA(IP15_13_12, LCDOUT18), 1690 PINMUX_IPSR_DATA(IP15_13_12, LCDOUT18),
1692 PINMUX_IPSR_MODSEL_DATA(IP15_15_14, HTX0, SEL_HSCIF0_0), 1691 PINMUX_IPSR_MSEL(IP15_15_14, HTX0, SEL_HSCIF0_0),
1693 PINMUX_IPSR_DATA(IP15_15_14, DU2_DB3), 1692 PINMUX_IPSR_DATA(IP15_15_14, DU2_DB3),
1694 PINMUX_IPSR_DATA(IP15_15_14, LCDOUT19), 1693 PINMUX_IPSR_DATA(IP15_15_14, LCDOUT19),
1695 PINMUX_IPSR_MODSEL_DATA(IP15_17_16, HCTS0_N, SEL_HSCIF0_0), 1694 PINMUX_IPSR_MSEL(IP15_17_16, HCTS0_N, SEL_HSCIF0_0),
1696 PINMUX_IPSR_DATA(IP15_17_16, SSI_SCK9), 1695 PINMUX_IPSR_DATA(IP15_17_16, SSI_SCK9),
1697 PINMUX_IPSR_DATA(IP15_17_16, DU2_DB4), 1696 PINMUX_IPSR_DATA(IP15_17_16, DU2_DB4),
1698 PINMUX_IPSR_DATA(IP15_17_16, LCDOUT20), 1697 PINMUX_IPSR_DATA(IP15_17_16, LCDOUT20),
1699 PINMUX_IPSR_MODSEL_DATA(IP15_19_18, HRTS0_N, SEL_HSCIF0_0), 1698 PINMUX_IPSR_MSEL(IP15_19_18, HRTS0_N, SEL_HSCIF0_0),
1700 PINMUX_IPSR_DATA(IP15_19_18, SSI_WS9), 1699 PINMUX_IPSR_DATA(IP15_19_18, SSI_WS9),
1701 PINMUX_IPSR_DATA(IP15_19_18, DU2_DB5), 1700 PINMUX_IPSR_DATA(IP15_19_18, DU2_DB5),
1702 PINMUX_IPSR_DATA(IP15_19_18, LCDOUT21), 1701 PINMUX_IPSR_DATA(IP15_19_18, LCDOUT21),
1703 PINMUX_IPSR_MODSEL_DATA(IP15_22_20, MSIOF0_SCK, SEL_SOF0_0), 1702 PINMUX_IPSR_MSEL(IP15_22_20, MSIOF0_SCK, SEL_SOF0_0),
1704 PINMUX_IPSR_MODSEL_DATA(IP15_22_20, TS_SDAT0, SEL_TSIF0_0), 1703 PINMUX_IPSR_MSEL(IP15_22_20, TS_SDAT0, SEL_TSIF0_0),
1705 PINMUX_IPSR_DATA(IP15_22_20, ADICLK), 1704 PINMUX_IPSR_DATA(IP15_22_20, ADICLK),
1706 PINMUX_IPSR_DATA(IP15_22_20, DU2_DB6), 1705 PINMUX_IPSR_DATA(IP15_22_20, DU2_DB6),
1707 PINMUX_IPSR_DATA(IP15_22_20, LCDOUT22), 1706 PINMUX_IPSR_DATA(IP15_22_20, LCDOUT22),
1708 PINMUX_IPSR_DATA(IP15_25_23, MSIOF0_SYNC), 1707 PINMUX_IPSR_DATA(IP15_25_23, MSIOF0_SYNC),
1709 PINMUX_IPSR_MODSEL_DATA(IP15_25_23, TS_SCK0, SEL_TSIF0_0), 1708 PINMUX_IPSR_MSEL(IP15_25_23, TS_SCK0, SEL_TSIF0_0),
1710 PINMUX_IPSR_DATA(IP15_25_23, SSI_SCK2), 1709 PINMUX_IPSR_DATA(IP15_25_23, SSI_SCK2),
1711 PINMUX_IPSR_DATA(IP15_25_23, ADIDATA), 1710 PINMUX_IPSR_DATA(IP15_25_23, ADIDATA),
1712 PINMUX_IPSR_DATA(IP15_25_23, DU2_DB7), 1711 PINMUX_IPSR_DATA(IP15_25_23, DU2_DB7),
1713 PINMUX_IPSR_DATA(IP15_25_23, LCDOUT23), 1712 PINMUX_IPSR_DATA(IP15_25_23, LCDOUT23),
1714 PINMUX_IPSR_MODSEL_DATA(IP15_25_23, HRX0_C, SEL_SCIFA2_1), 1713 PINMUX_IPSR_MSEL(IP15_25_23, HRX0_C, SEL_SCIFA2_1),
1715 PINMUX_IPSR_MODSEL_DATA(IP15_27_26, MSIOF0_SS1, SEL_SOF0_0), 1714 PINMUX_IPSR_MSEL(IP15_27_26, MSIOF0_SS1, SEL_SOF0_0),
1716 PINMUX_IPSR_DATA(IP15_27_26, ADICHS0), 1715 PINMUX_IPSR_DATA(IP15_27_26, ADICHS0),
1717 PINMUX_IPSR_DATA(IP15_27_26, DU2_DG5), 1716 PINMUX_IPSR_DATA(IP15_27_26, DU2_DG5),
1718 PINMUX_IPSR_DATA(IP15_27_26, LCDOUT13), 1717 PINMUX_IPSR_DATA(IP15_27_26, LCDOUT13),
1719 PINMUX_IPSR_MODSEL_DATA(IP15_29_28, MSIOF0_TXD, SEL_SOF0_0), 1718 PINMUX_IPSR_MSEL(IP15_29_28, MSIOF0_TXD, SEL_SOF0_0),
1720 PINMUX_IPSR_DATA(IP15_29_28, ADICHS1), 1719 PINMUX_IPSR_DATA(IP15_29_28, ADICHS1),
1721 PINMUX_IPSR_DATA(IP15_29_28, DU2_DG6), 1720 PINMUX_IPSR_DATA(IP15_29_28, DU2_DG6),
1722 PINMUX_IPSR_DATA(IP15_29_28, LCDOUT14), 1721 PINMUX_IPSR_DATA(IP15_29_28, LCDOUT14),
1723 1722
1724 PINMUX_IPSR_MODSEL_DATA(IP16_2_0, MSIOF0_SS2, SEL_SOF0_0), 1723 PINMUX_IPSR_MSEL(IP16_2_0, MSIOF0_SS2, SEL_SOF0_0),
1725 PINMUX_IPSR_DATA(IP16_2_0, AUDIO_CLKOUT), 1724 PINMUX_IPSR_DATA(IP16_2_0, AUDIO_CLKOUT),
1726 PINMUX_IPSR_DATA(IP16_2_0, ADICHS2), 1725 PINMUX_IPSR_DATA(IP16_2_0, ADICHS2),
1727 PINMUX_IPSR_DATA(IP16_2_0, DU2_DISP), 1726 PINMUX_IPSR_DATA(IP16_2_0, DU2_DISP),
1728 PINMUX_IPSR_DATA(IP16_2_0, QPOLA), 1727 PINMUX_IPSR_DATA(IP16_2_0, QPOLA),
1729 PINMUX_IPSR_MODSEL_DATA(IP16_2_0, HTX0_C, SEL_HSCIF0_2), 1728 PINMUX_IPSR_MSEL(IP16_2_0, HTX0_C, SEL_HSCIF0_2),
1730 PINMUX_IPSR_MODSEL_DATA(IP16_2_0, SCIFA2_TXD_B, SEL_SCIFA2_1), 1729 PINMUX_IPSR_MSEL(IP16_2_0, SCIFA2_TXD_B, SEL_SCIFA2_1),
1731 PINMUX_IPSR_MODSEL_DATA(IP16_5_3, MSIOF0_RXD, SEL_SOF0_0), 1730 PINMUX_IPSR_MSEL(IP16_5_3, MSIOF0_RXD, SEL_SOF0_0),
1732 PINMUX_IPSR_MODSEL_DATA(IP16_5_3, TS_SPSYNC0, SEL_TSIF0_0), 1731 PINMUX_IPSR_MSEL(IP16_5_3, TS_SPSYNC0, SEL_TSIF0_0),
1733 PINMUX_IPSR_DATA(IP16_5_3, SSI_WS2), 1732 PINMUX_IPSR_DATA(IP16_5_3, SSI_WS2),
1734 PINMUX_IPSR_DATA(IP16_5_3, ADICS_SAMP), 1733 PINMUX_IPSR_DATA(IP16_5_3, ADICS_SAMP),
1735 PINMUX_IPSR_DATA(IP16_5_3, DU2_CDE), 1734 PINMUX_IPSR_DATA(IP16_5_3, DU2_CDE),
1736 PINMUX_IPSR_DATA(IP16_5_3, QPOLB), 1735 PINMUX_IPSR_DATA(IP16_5_3, QPOLB),
1737 PINMUX_IPSR_MODSEL_DATA(IP16_5_3, SCIFA2_RXD_B, SEL_HSCIF0_2), 1736 PINMUX_IPSR_MSEL(IP16_5_3, SCIFA2_RXD_B, SEL_HSCIF0_2),
1738 PINMUX_IPSR_DATA(IP16_6, USB1_PWEN), 1737 PINMUX_IPSR_DATA(IP16_6, USB1_PWEN),
1739 PINMUX_IPSR_DATA(IP16_6, AUDIO_CLKOUT_D), 1738 PINMUX_IPSR_DATA(IP16_6, AUDIO_CLKOUT_D),
1740 PINMUX_IPSR_DATA(IP16_7, USB1_OVC), 1739 PINMUX_IPSR_DATA(IP16_7, USB1_OVC),
1741 PINMUX_IPSR_MODSEL_DATA(IP16_7, TCLK1_B, SEL_TMU1_1), 1740 PINMUX_IPSR_MSEL(IP16_7, TCLK1_B, SEL_TMU1_1),
1742 1741
1743 PINMUX_DATA(IIC0_SCL_MARK, FN_SEL_IIC0_0), 1742 PINMUX_DATA(IIC0_SCL_MARK, FN_SEL_IIC0_0),
1744 PINMUX_DATA(IIC0_SDA_MARK, FN_SEL_IIC0_0), 1743 PINMUX_DATA(IIC0_SDA_MARK, FN_SEL_IIC0_0),
@@ -3624,25 +3623,6 @@ static const unsigned int usb2_pins[] = {
3624static const unsigned int usb2_mux[] = { 3623static const unsigned int usb2_mux[] = {
3625 USB2_PWEN_MARK, USB2_OVC_MARK, 3624 USB2_PWEN_MARK, USB2_OVC_MARK,
3626}; 3625};
3627
3628union vin_data {
3629 unsigned int data24[24];
3630 unsigned int data20[20];
3631 unsigned int data16[16];
3632 unsigned int data12[12];
3633 unsigned int data10[10];
3634 unsigned int data8[8];
3635 unsigned int data4[4];
3636};
3637
3638#define VIN_DATA_PIN_GROUP(n, s) \
3639 { \
3640 .name = #n#s, \
3641 .pins = n##_pins.data##s, \
3642 .mux = n##_mux.data##s, \
3643 .nr_pins = ARRAY_SIZE(n##_pins.data##s), \
3644 }
3645
3646/* - VIN0 ------------------------------------------------------------------- */ 3626/* - VIN0 ------------------------------------------------------------------- */
3647static const union vin_data vin0_data_pins = { 3627static const union vin_data vin0_data_pins = {
3648 .data24 = { 3628 .data24 = {
@@ -5719,6 +5699,6 @@ const struct sh_pfc_soc_info r8a7790_pinmux_info = {
5719 5699
5720 .cfg_regs = pinmux_config_regs, 5700 .cfg_regs = pinmux_config_regs,
5721 5701
5722 .gpio_data = pinmux_data, 5702 .pinmux_data = pinmux_data,
5723 .gpio_data_size = ARRAY_SIZE(pinmux_data), 5703 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
5724}; 5704};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 25e8117f5a1a..87a4f44147c1 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/platform_data/gpio-rcar.h>
13 12
14#include "core.h" 13#include "core.h"
15#include "sh_pfc.h" 14#include "sh_pfc.h"
@@ -824,459 +823,459 @@ static const u16 pinmux_data[] = {
824 PINMUX_IPSR_DATA(IP0_14, D14), 823 PINMUX_IPSR_DATA(IP0_14, D14),
825 PINMUX_IPSR_DATA(IP0_15, D15), 824 PINMUX_IPSR_DATA(IP0_15, D15),
826 PINMUX_IPSR_DATA(IP0_18_16, A0), 825 PINMUX_IPSR_DATA(IP0_18_16, A0),
827 PINMUX_IPSR_MODSEL_DATA(IP0_18_16, ATAWR0_N_C, SEL_LBS_2), 826 PINMUX_IPSR_MSEL(IP0_18_16, ATAWR0_N_C, SEL_LBS_2),
828 PINMUX_IPSR_MODSEL_DATA(IP0_18_16, MSIOF0_SCK_B, SEL_SOF0_1), 827 PINMUX_IPSR_MSEL(IP0_18_16, MSIOF0_SCK_B, SEL_SOF0_1),
829 PINMUX_IPSR_MODSEL_DATA(IP0_18_16, SCL0_C, SEL_IIC0_2), 828 PINMUX_IPSR_MSEL(IP0_18_16, SCL0_C, SEL_IIC0_2),
830 PINMUX_IPSR_DATA(IP0_18_16, PWM2_B), 829 PINMUX_IPSR_DATA(IP0_18_16, PWM2_B),
831 PINMUX_IPSR_DATA(IP0_20_19, A1), 830 PINMUX_IPSR_DATA(IP0_20_19, A1),
832 PINMUX_IPSR_MODSEL_DATA(IP0_20_19, MSIOF0_SYNC_B, SEL_SOF0_1), 831 PINMUX_IPSR_MSEL(IP0_20_19, MSIOF0_SYNC_B, SEL_SOF0_1),
833 PINMUX_IPSR_DATA(IP0_22_21, A2), 832 PINMUX_IPSR_DATA(IP0_22_21, A2),
834 PINMUX_IPSR_MODSEL_DATA(IP0_22_21, MSIOF0_SS1_B, SEL_SOF0_1), 833 PINMUX_IPSR_MSEL(IP0_22_21, MSIOF0_SS1_B, SEL_SOF0_1),
835 PINMUX_IPSR_DATA(IP0_24_23, A3), 834 PINMUX_IPSR_DATA(IP0_24_23, A3),
836 PINMUX_IPSR_MODSEL_DATA(IP0_24_23, MSIOF0_SS2_B, SEL_SOF0_1), 835 PINMUX_IPSR_MSEL(IP0_24_23, MSIOF0_SS2_B, SEL_SOF0_1),
837 PINMUX_IPSR_DATA(IP0_26_25, A4), 836 PINMUX_IPSR_DATA(IP0_26_25, A4),
838 PINMUX_IPSR_MODSEL_DATA(IP0_26_25, MSIOF0_TXD_B, SEL_SOF0_1), 837 PINMUX_IPSR_MSEL(IP0_26_25, MSIOF0_TXD_B, SEL_SOF0_1),
839 PINMUX_IPSR_DATA(IP0_28_27, A5), 838 PINMUX_IPSR_DATA(IP0_28_27, A5),
840 PINMUX_IPSR_MODSEL_DATA(IP0_28_27, MSIOF0_RXD_B, SEL_SOF0_1), 839 PINMUX_IPSR_MSEL(IP0_28_27, MSIOF0_RXD_B, SEL_SOF0_1),
841 PINMUX_IPSR_DATA(IP0_30_29, A6), 840 PINMUX_IPSR_DATA(IP0_30_29, A6),
842 PINMUX_IPSR_MODSEL_DATA(IP0_30_29, MSIOF1_SCK, SEL_SOF1_0), 841 PINMUX_IPSR_MSEL(IP0_30_29, MSIOF1_SCK, SEL_SOF1_0),
843 842
844 /* IPSR1 */ 843 /* IPSR1 */
845 PINMUX_IPSR_DATA(IP1_1_0, A7), 844 PINMUX_IPSR_DATA(IP1_1_0, A7),
846 PINMUX_IPSR_MODSEL_DATA(IP1_1_0, MSIOF1_SYNC, SEL_SOF1_0), 845 PINMUX_IPSR_MSEL(IP1_1_0, MSIOF1_SYNC, SEL_SOF1_0),
847 PINMUX_IPSR_DATA(IP1_3_2, A8), 846 PINMUX_IPSR_DATA(IP1_3_2, A8),
848 PINMUX_IPSR_MODSEL_DATA(IP1_3_2, MSIOF1_SS1, SEL_SOF1_0), 847 PINMUX_IPSR_MSEL(IP1_3_2, MSIOF1_SS1, SEL_SOF1_0),
849 PINMUX_IPSR_MODSEL_DATA(IP1_3_2, SCL0, SEL_IIC0_0), 848 PINMUX_IPSR_MSEL(IP1_3_2, SCL0, SEL_IIC0_0),
850 PINMUX_IPSR_DATA(IP1_5_4, A9), 849 PINMUX_IPSR_DATA(IP1_5_4, A9),
851 PINMUX_IPSR_MODSEL_DATA(IP1_5_4, MSIOF1_SS2, SEL_SOF1_0), 850 PINMUX_IPSR_MSEL(IP1_5_4, MSIOF1_SS2, SEL_SOF1_0),
852 PINMUX_IPSR_MODSEL_DATA(IP1_5_4, SDA0, SEL_IIC0_0), 851 PINMUX_IPSR_MSEL(IP1_5_4, SDA0, SEL_IIC0_0),
853 PINMUX_IPSR_DATA(IP1_7_6, A10), 852 PINMUX_IPSR_DATA(IP1_7_6, A10),
854 PINMUX_IPSR_MODSEL_DATA(IP1_7_6, MSIOF1_TXD, SEL_SOF1_0), 853 PINMUX_IPSR_MSEL(IP1_7_6, MSIOF1_TXD, SEL_SOF1_0),
855 PINMUX_IPSR_MODSEL_DATA(IP1_7_6, MSIOF1_TXD_D, SEL_SOF1_3), 854 PINMUX_IPSR_MSEL(IP1_7_6, MSIOF1_TXD_D, SEL_SOF1_3),
856 PINMUX_IPSR_DATA(IP1_10_8, A11), 855 PINMUX_IPSR_DATA(IP1_10_8, A11),
857 PINMUX_IPSR_MODSEL_DATA(IP1_10_8, MSIOF1_RXD, SEL_SOF1_0), 856 PINMUX_IPSR_MSEL(IP1_10_8, MSIOF1_RXD, SEL_SOF1_0),
858 PINMUX_IPSR_MODSEL_DATA(IP1_10_8, SCL3_D, SEL_IIC3_3), 857 PINMUX_IPSR_MSEL(IP1_10_8, SCL3_D, SEL_IIC3_3),
859 PINMUX_IPSR_MODSEL_DATA(IP1_10_8, MSIOF1_RXD_D, SEL_SOF1_3), 858 PINMUX_IPSR_MSEL(IP1_10_8, MSIOF1_RXD_D, SEL_SOF1_3),
860 PINMUX_IPSR_DATA(IP1_13_11, A12), 859 PINMUX_IPSR_DATA(IP1_13_11, A12),
861 PINMUX_IPSR_MODSEL_DATA(IP1_13_11, FMCLK, SEL_FM_0), 860 PINMUX_IPSR_MSEL(IP1_13_11, FMCLK, SEL_FM_0),
862 PINMUX_IPSR_MODSEL_DATA(IP1_13_11, SDA3_D, SEL_IIC3_3), 861 PINMUX_IPSR_MSEL(IP1_13_11, SDA3_D, SEL_IIC3_3),
863 PINMUX_IPSR_MODSEL_DATA(IP1_13_11, MSIOF1_SCK_D, SEL_SOF1_3), 862 PINMUX_IPSR_MSEL(IP1_13_11, MSIOF1_SCK_D, SEL_SOF1_3),
864 PINMUX_IPSR_DATA(IP1_16_14, A13), 863 PINMUX_IPSR_DATA(IP1_16_14, A13),
865 PINMUX_IPSR_MODSEL_DATA(IP1_16_14, ATAG0_N_C, SEL_LBS_2), 864 PINMUX_IPSR_MSEL(IP1_16_14, ATAG0_N_C, SEL_LBS_2),
866 PINMUX_IPSR_MODSEL_DATA(IP1_16_14, BPFCLK, SEL_FM_0), 865 PINMUX_IPSR_MSEL(IP1_16_14, BPFCLK, SEL_FM_0),
867 PINMUX_IPSR_MODSEL_DATA(IP1_16_14, MSIOF1_SS1_D, SEL_SOF1_3), 866 PINMUX_IPSR_MSEL(IP1_16_14, MSIOF1_SS1_D, SEL_SOF1_3),
868 PINMUX_IPSR_DATA(IP1_19_17, A14), 867 PINMUX_IPSR_DATA(IP1_19_17, A14),
869 PINMUX_IPSR_MODSEL_DATA(IP1_19_17, ATADIR0_N_C, SEL_LBS_2), 868 PINMUX_IPSR_MSEL(IP1_19_17, ATADIR0_N_C, SEL_LBS_2),
870 PINMUX_IPSR_MODSEL_DATA(IP1_19_17, FMIN, SEL_FM_0), 869 PINMUX_IPSR_MSEL(IP1_19_17, FMIN, SEL_FM_0),
871 PINMUX_IPSR_MODSEL_DATA(IP1_19_17, FMIN_C, SEL_FM_2), 870 PINMUX_IPSR_MSEL(IP1_19_17, FMIN_C, SEL_FM_2),
872 PINMUX_IPSR_MODSEL_DATA(IP1_19_17, MSIOF1_SYNC_D, SEL_SOF1_3), 871 PINMUX_IPSR_MSEL(IP1_19_17, MSIOF1_SYNC_D, SEL_SOF1_3),
873 PINMUX_IPSR_DATA(IP1_22_20, A15), 872 PINMUX_IPSR_DATA(IP1_22_20, A15),
874 PINMUX_IPSR_MODSEL_DATA(IP1_22_20, BPFCLK_C, SEL_FM_2), 873 PINMUX_IPSR_MSEL(IP1_22_20, BPFCLK_C, SEL_FM_2),
875 PINMUX_IPSR_DATA(IP1_25_23, A16), 874 PINMUX_IPSR_DATA(IP1_25_23, A16),
876 PINMUX_IPSR_MODSEL_DATA(IP1_25_23, DREQ2_B, SEL_LBS_1), 875 PINMUX_IPSR_MSEL(IP1_25_23, DREQ2_B, SEL_LBS_1),
877 PINMUX_IPSR_MODSEL_DATA(IP1_25_23, FMCLK_C, SEL_FM_2), 876 PINMUX_IPSR_MSEL(IP1_25_23, FMCLK_C, SEL_FM_2),
878 PINMUX_IPSR_MODSEL_DATA(IP1_25_23, SCIFA1_SCK_B, SEL_SCIFA1_1), 877 PINMUX_IPSR_MSEL(IP1_25_23, SCIFA1_SCK_B, SEL_SCIFA1_1),
879 PINMUX_IPSR_DATA(IP1_28_26, A17), 878 PINMUX_IPSR_DATA(IP1_28_26, A17),
880 PINMUX_IPSR_MODSEL_DATA(IP1_28_26, DACK2_B, SEL_LBS_1), 879 PINMUX_IPSR_MSEL(IP1_28_26, DACK2_B, SEL_LBS_1),
881 PINMUX_IPSR_MODSEL_DATA(IP1_28_26, SDA0_C, SEL_IIC0_2), 880 PINMUX_IPSR_MSEL(IP1_28_26, SDA0_C, SEL_IIC0_2),
882 PINMUX_IPSR_DATA(IP1_31_29, A18), 881 PINMUX_IPSR_DATA(IP1_31_29, A18),
883 PINMUX_IPSR_MODSEL_DATA(IP1_31_29, DREQ1, SEL_LBS_0), 882 PINMUX_IPSR_MSEL(IP1_31_29, DREQ1, SEL_LBS_0),
884 PINMUX_IPSR_MODSEL_DATA(IP1_31_29, SCIFA1_RXD_C, SEL_SCIFA1_2), 883 PINMUX_IPSR_MSEL(IP1_31_29, SCIFA1_RXD_C, SEL_SCIFA1_2),
885 PINMUX_IPSR_MODSEL_DATA(IP1_31_29, SCIFB1_RXD_C, SEL_SCIFB1_2), 884 PINMUX_IPSR_MSEL(IP1_31_29, SCIFB1_RXD_C, SEL_SCIFB1_2),
886 885
887 /* IPSR2 */ 886 /* IPSR2 */
888 PINMUX_IPSR_DATA(IP2_2_0, A19), 887 PINMUX_IPSR_DATA(IP2_2_0, A19),
889 PINMUX_IPSR_DATA(IP2_2_0, DACK1), 888 PINMUX_IPSR_DATA(IP2_2_0, DACK1),
890 PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SCIFA1_TXD_C, SEL_SCIFA1_2), 889 PINMUX_IPSR_MSEL(IP2_2_0, SCIFA1_TXD_C, SEL_SCIFA1_2),
891 PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SCIFB1_TXD_C, SEL_SCIFB1_2), 890 PINMUX_IPSR_MSEL(IP2_2_0, SCIFB1_TXD_C, SEL_SCIFB1_2),
892 PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SCIFB1_SCK_B, SEL_SCIFB1_0), 891 PINMUX_IPSR_MSEL(IP2_2_0, SCIFB1_SCK_B, SEL_SCIFB1_1),
893 PINMUX_IPSR_DATA(IP2_2_0, A20), 892 PINMUX_IPSR_DATA(IP2_2_0, A20),
894 PINMUX_IPSR_MODSEL_DATA(IP2_4_3, SPCLK, SEL_QSP_0), 893 PINMUX_IPSR_MSEL(IP2_4_3, SPCLK, SEL_QSP_0),
895 PINMUX_IPSR_DATA(IP2_6_5, A21), 894 PINMUX_IPSR_DATA(IP2_6_5, A21),
896 PINMUX_IPSR_MODSEL_DATA(IP2_6_5, ATAWR0_N_B, SEL_LBS_1), 895 PINMUX_IPSR_MSEL(IP2_6_5, ATAWR0_N_B, SEL_LBS_1),
897 PINMUX_IPSR_MODSEL_DATA(IP2_6_5, MOSI_IO0, SEL_QSP_0), 896 PINMUX_IPSR_MSEL(IP2_6_5, MOSI_IO0, SEL_QSP_0),
898 PINMUX_IPSR_DATA(IP2_9_7, A22), 897 PINMUX_IPSR_DATA(IP2_9_7, A22),
899 PINMUX_IPSR_MODSEL_DATA(IP2_9_7, MISO_IO1, SEL_QSP_0), 898 PINMUX_IPSR_MSEL(IP2_9_7, MISO_IO1, SEL_QSP_0),
900 PINMUX_IPSR_MODSEL_DATA(IP2_9_7, FMCLK_B, SEL_FM_1), 899 PINMUX_IPSR_MSEL(IP2_9_7, FMCLK_B, SEL_FM_1),
901 PINMUX_IPSR_MODSEL_DATA(IP2_9_7, TX0, SEL_SCIF0_0), 900 PINMUX_IPSR_MSEL(IP2_9_7, TX0, SEL_SCIF0_0),
902 PINMUX_IPSR_MODSEL_DATA(IP2_9_7, SCIFA0_TXD, SEL_SCFA_0), 901 PINMUX_IPSR_MSEL(IP2_9_7, SCIFA0_TXD, SEL_SCFA_0),
903 PINMUX_IPSR_DATA(IP2_12_10, A23), 902 PINMUX_IPSR_DATA(IP2_12_10, A23),
904 PINMUX_IPSR_MODSEL_DATA(IP2_12_10, IO2, SEL_QSP_0), 903 PINMUX_IPSR_MSEL(IP2_12_10, IO2, SEL_QSP_0),
905 PINMUX_IPSR_MODSEL_DATA(IP2_12_10, BPFCLK_B, SEL_FM_1), 904 PINMUX_IPSR_MSEL(IP2_12_10, BPFCLK_B, SEL_FM_1),
906 PINMUX_IPSR_MODSEL_DATA(IP2_12_10, RX0, SEL_SCIF0_0), 905 PINMUX_IPSR_MSEL(IP2_12_10, RX0, SEL_SCIF0_0),
907 PINMUX_IPSR_MODSEL_DATA(IP2_12_10, SCIFA0_RXD, SEL_SCFA_0), 906 PINMUX_IPSR_MSEL(IP2_12_10, SCIFA0_RXD, SEL_SCFA_0),
908 PINMUX_IPSR_DATA(IP2_15_13, A24), 907 PINMUX_IPSR_DATA(IP2_15_13, A24),
909 PINMUX_IPSR_MODSEL_DATA(IP2_15_13, DREQ2, SEL_LBS_0), 908 PINMUX_IPSR_MSEL(IP2_15_13, DREQ2, SEL_LBS_0),
910 PINMUX_IPSR_MODSEL_DATA(IP2_15_13, IO3, SEL_QSP_0), 909 PINMUX_IPSR_MSEL(IP2_15_13, IO3, SEL_QSP_0),
911 PINMUX_IPSR_MODSEL_DATA(IP2_15_13, TX1, SEL_SCIF1_0), 910 PINMUX_IPSR_MSEL(IP2_15_13, TX1, SEL_SCIF1_0),
912 PINMUX_IPSR_MODSEL_DATA(IP2_15_13, SCIFA1_TXD, SEL_SCIFA1_0), 911 PINMUX_IPSR_MSEL(IP2_15_13, SCIFA1_TXD, SEL_SCIFA1_0),
913 PINMUX_IPSR_DATA(IP2_18_16, A25), 912 PINMUX_IPSR_DATA(IP2_18_16, A25),
914 PINMUX_IPSR_MODSEL_DATA(IP2_18_16, DACK2, SEL_LBS_0), 913 PINMUX_IPSR_MSEL(IP2_18_16, DACK2, SEL_LBS_0),
915 PINMUX_IPSR_MODSEL_DATA(IP2_18_16, SSL, SEL_QSP_0), 914 PINMUX_IPSR_MSEL(IP2_18_16, SSL, SEL_QSP_0),
916 PINMUX_IPSR_MODSEL_DATA(IP2_18_16, DREQ1_C, SEL_LBS_2), 915 PINMUX_IPSR_MSEL(IP2_18_16, DREQ1_C, SEL_LBS_2),
917 PINMUX_IPSR_MODSEL_DATA(IP2_18_16, RX1, SEL_SCIF1_0), 916 PINMUX_IPSR_MSEL(IP2_18_16, RX1, SEL_SCIF1_0),
918 PINMUX_IPSR_MODSEL_DATA(IP2_18_16, SCIFA1_RXD, SEL_SCIFA1_0), 917 PINMUX_IPSR_MSEL(IP2_18_16, SCIFA1_RXD, SEL_SCIFA1_0),
919 PINMUX_IPSR_DATA(IP2_20_19, CS0_N), 918 PINMUX_IPSR_DATA(IP2_20_19, CS0_N),
920 PINMUX_IPSR_MODSEL_DATA(IP2_20_19, ATAG0_N_B, SEL_LBS_1), 919 PINMUX_IPSR_MSEL(IP2_20_19, ATAG0_N_B, SEL_LBS_1),
921 PINMUX_IPSR_MODSEL_DATA(IP2_20_19, SCL1, SEL_IIC1_0), 920 PINMUX_IPSR_MSEL(IP2_20_19, SCL1, SEL_IIC1_0),
922 PINMUX_IPSR_DATA(IP2_22_21, CS1_N_A26), 921 PINMUX_IPSR_DATA(IP2_22_21, CS1_N_A26),
923 PINMUX_IPSR_MODSEL_DATA(IP2_22_21, ATADIR0_N_B, SEL_LBS_1), 922 PINMUX_IPSR_MSEL(IP2_22_21, ATADIR0_N_B, SEL_LBS_1),
924 PINMUX_IPSR_MODSEL_DATA(IP2_22_21, SDA1, SEL_IIC1_0), 923 PINMUX_IPSR_MSEL(IP2_22_21, SDA1, SEL_IIC1_0),
925 PINMUX_IPSR_DATA(IP2_24_23, EX_CS1_N), 924 PINMUX_IPSR_DATA(IP2_24_23, EX_CS1_N),
926 PINMUX_IPSR_MODSEL_DATA(IP2_24_23, MSIOF2_SCK, SEL_SOF2_0), 925 PINMUX_IPSR_MSEL(IP2_24_23, MSIOF2_SCK, SEL_SOF2_0),
927 PINMUX_IPSR_DATA(IP2_26_25, EX_CS2_N), 926 PINMUX_IPSR_DATA(IP2_26_25, EX_CS2_N),
928 PINMUX_IPSR_MODSEL_DATA(IP2_26_25, ATAWR0_N, SEL_LBS_0), 927 PINMUX_IPSR_MSEL(IP2_26_25, ATAWR0_N, SEL_LBS_0),
929 PINMUX_IPSR_MODSEL_DATA(IP2_26_25, MSIOF2_SYNC, SEL_SOF2_0), 928 PINMUX_IPSR_MSEL(IP2_26_25, MSIOF2_SYNC, SEL_SOF2_0),
930 PINMUX_IPSR_DATA(IP2_29_27, EX_CS3_N), 929 PINMUX_IPSR_DATA(IP2_29_27, EX_CS3_N),
931 PINMUX_IPSR_MODSEL_DATA(IP2_29_27, ATADIR0_N, SEL_LBS_0), 930 PINMUX_IPSR_MSEL(IP2_29_27, ATADIR0_N, SEL_LBS_0),
932 PINMUX_IPSR_MODSEL_DATA(IP2_29_27, MSIOF2_TXD, SEL_SOF2_0), 931 PINMUX_IPSR_MSEL(IP2_29_27, MSIOF2_TXD, SEL_SOF2_0),
933 PINMUX_IPSR_MODSEL_DATA(IP2_29_27, ATAG0_N, SEL_LBS_0), 932 PINMUX_IPSR_MSEL(IP2_29_27, ATAG0_N, SEL_LBS_0),
934 PINMUX_IPSR_DATA(IP2_29_27, EX_WAIT1), 933 PINMUX_IPSR_DATA(IP2_29_27, EX_WAIT1),
935 934
936 /* IPSR3 */ 935 /* IPSR3 */
937 PINMUX_IPSR_DATA(IP3_2_0, EX_CS4_N), 936 PINMUX_IPSR_DATA(IP3_2_0, EX_CS4_N),
938 PINMUX_IPSR_MODSEL_DATA(IP3_2_0, ATARD0_N, SEL_LBS_0), 937 PINMUX_IPSR_MSEL(IP3_2_0, ATARD0_N, SEL_LBS_0),
939 PINMUX_IPSR_MODSEL_DATA(IP3_2_0, MSIOF2_RXD, SEL_SOF2_0), 938 PINMUX_IPSR_MSEL(IP3_2_0, MSIOF2_RXD, SEL_SOF2_0),
940 PINMUX_IPSR_DATA(IP3_2_0, EX_WAIT2), 939 PINMUX_IPSR_DATA(IP3_2_0, EX_WAIT2),
941 PINMUX_IPSR_DATA(IP3_5_3, EX_CS5_N), 940 PINMUX_IPSR_DATA(IP3_5_3, EX_CS5_N),
942 PINMUX_IPSR_DATA(IP3_5_3, ATACS00_N), 941 PINMUX_IPSR_DATA(IP3_5_3, ATACS00_N),
943 PINMUX_IPSR_MODSEL_DATA(IP3_5_3, MSIOF2_SS1, SEL_SOF2_0), 942 PINMUX_IPSR_MSEL(IP3_5_3, MSIOF2_SS1, SEL_SOF2_0),
944 PINMUX_IPSR_MODSEL_DATA(IP3_5_3, HRX1_B, SEL_HSCIF1_1), 943 PINMUX_IPSR_MSEL(IP3_5_3, HRX1_B, SEL_HSCIF1_1),
945 PINMUX_IPSR_MODSEL_DATA(IP3_5_3, SCIFB1_RXD_B, SEL_SCIFB1_1), 944 PINMUX_IPSR_MSEL(IP3_5_3, SCIFB1_RXD_B, SEL_SCIFB1_1),
946 PINMUX_IPSR_DATA(IP3_5_3, PWM1), 945 PINMUX_IPSR_DATA(IP3_5_3, PWM1),
947 PINMUX_IPSR_DATA(IP3_5_3, TPU_TO1), 946 PINMUX_IPSR_DATA(IP3_5_3, TPU_TO1),
948 PINMUX_IPSR_DATA(IP3_8_6, BS_N), 947 PINMUX_IPSR_DATA(IP3_8_6, BS_N),
949 PINMUX_IPSR_DATA(IP3_8_6, ATACS10_N), 948 PINMUX_IPSR_DATA(IP3_8_6, ATACS10_N),
950 PINMUX_IPSR_MODSEL_DATA(IP3_8_6, MSIOF2_SS2, SEL_SOF2_0), 949 PINMUX_IPSR_MSEL(IP3_8_6, MSIOF2_SS2, SEL_SOF2_0),
951 PINMUX_IPSR_MODSEL_DATA(IP3_8_6, HTX1_B, SEL_HSCIF1_1), 950 PINMUX_IPSR_MSEL(IP3_8_6, HTX1_B, SEL_HSCIF1_1),
952 PINMUX_IPSR_MODSEL_DATA(IP3_8_6, SCIFB1_TXD_B, SEL_SCIFB1_1), 951 PINMUX_IPSR_MSEL(IP3_8_6, SCIFB1_TXD_B, SEL_SCIFB1_1),
953 PINMUX_IPSR_DATA(IP3_8_6, PWM2), 952 PINMUX_IPSR_DATA(IP3_8_6, PWM2),
954 PINMUX_IPSR_DATA(IP3_8_6, TPU_TO2), 953 PINMUX_IPSR_DATA(IP3_8_6, TPU_TO2),
955 PINMUX_IPSR_DATA(IP3_11_9, RD_WR_N), 954 PINMUX_IPSR_DATA(IP3_11_9, RD_WR_N),
956 PINMUX_IPSR_MODSEL_DATA(IP3_11_9, HRX2_B, SEL_HSCIF2_1), 955 PINMUX_IPSR_MSEL(IP3_11_9, HRX2_B, SEL_HSCIF2_1),
957 PINMUX_IPSR_MODSEL_DATA(IP3_11_9, FMIN_B, SEL_FM_1), 956 PINMUX_IPSR_MSEL(IP3_11_9, FMIN_B, SEL_FM_1),
958 PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SCIFB0_RXD_B, SEL_SCIFB_1), 957 PINMUX_IPSR_MSEL(IP3_11_9, SCIFB0_RXD_B, SEL_SCIFB_1),
959 PINMUX_IPSR_MODSEL_DATA(IP3_11_9, DREQ1_D, SEL_LBS_1), 958 PINMUX_IPSR_MSEL(IP3_11_9, DREQ1_D, SEL_LBS_1),
960 PINMUX_IPSR_DATA(IP3_13_12, WE0_N), 959 PINMUX_IPSR_DATA(IP3_13_12, WE0_N),
961 PINMUX_IPSR_MODSEL_DATA(IP3_13_12, HCTS2_N_B, SEL_HSCIF2_1), 960 PINMUX_IPSR_MSEL(IP3_13_12, HCTS2_N_B, SEL_HSCIF2_1),
962 PINMUX_IPSR_MODSEL_DATA(IP3_13_12, SCIFB0_TXD_B, SEL_SCIFB_1), 961 PINMUX_IPSR_MSEL(IP3_13_12, SCIFB0_TXD_B, SEL_SCIFB_1),
963 PINMUX_IPSR_DATA(IP3_15_14, WE1_N), 962 PINMUX_IPSR_DATA(IP3_15_14, WE1_N),
964 PINMUX_IPSR_MODSEL_DATA(IP3_15_14, ATARD0_N_B, SEL_LBS_1), 963 PINMUX_IPSR_MSEL(IP3_15_14, ATARD0_N_B, SEL_LBS_1),
965 PINMUX_IPSR_MODSEL_DATA(IP3_15_14, HTX2_B, SEL_HSCIF2_1), 964 PINMUX_IPSR_MSEL(IP3_15_14, HTX2_B, SEL_HSCIF2_1),
966 PINMUX_IPSR_MODSEL_DATA(IP3_15_14, SCIFB0_RTS_N_B, SEL_SCIFB_1), 965 PINMUX_IPSR_MSEL(IP3_15_14, SCIFB0_RTS_N_B, SEL_SCIFB_1),
967 PINMUX_IPSR_DATA(IP3_17_16, EX_WAIT0), 966 PINMUX_IPSR_DATA(IP3_17_16, EX_WAIT0),
968 PINMUX_IPSR_MODSEL_DATA(IP3_17_16, HRTS2_N_B, SEL_HSCIF2_1), 967 PINMUX_IPSR_MSEL(IP3_17_16, HRTS2_N_B, SEL_HSCIF2_1),
969 PINMUX_IPSR_MODSEL_DATA(IP3_17_16, SCIFB0_CTS_N_B, SEL_SCIFB_1), 968 PINMUX_IPSR_MSEL(IP3_17_16, SCIFB0_CTS_N_B, SEL_SCIFB_1),
970 PINMUX_IPSR_DATA(IP3_19_18, DREQ0), 969 PINMUX_IPSR_DATA(IP3_19_18, DREQ0),
971 PINMUX_IPSR_DATA(IP3_19_18, PWM3), 970 PINMUX_IPSR_DATA(IP3_19_18, PWM3),
972 PINMUX_IPSR_DATA(IP3_19_18, TPU_TO3), 971 PINMUX_IPSR_DATA(IP3_19_18, TPU_TO3),
973 PINMUX_IPSR_DATA(IP3_21_20, DACK0), 972 PINMUX_IPSR_DATA(IP3_21_20, DACK0),
974 PINMUX_IPSR_DATA(IP3_21_20, DRACK0), 973 PINMUX_IPSR_DATA(IP3_21_20, DRACK0),
975 PINMUX_IPSR_MODSEL_DATA(IP3_21_20, REMOCON, SEL_RCN_0), 974 PINMUX_IPSR_MSEL(IP3_21_20, REMOCON, SEL_RCN_0),
976 PINMUX_IPSR_MODSEL_DATA(IP3_24_22, SPEEDIN, SEL_RSP_0), 975 PINMUX_IPSR_MSEL(IP3_24_22, SPEEDIN, SEL_RSP_0),
977 PINMUX_IPSR_MODSEL_DATA(IP3_24_22, HSCK0_C, SEL_HSCIF0_2), 976 PINMUX_IPSR_MSEL(IP3_24_22, HSCK0_C, SEL_HSCIF0_2),
978 PINMUX_IPSR_MODSEL_DATA(IP3_24_22, HSCK2_C, SEL_HSCIF2_2), 977 PINMUX_IPSR_MSEL(IP3_24_22, HSCK2_C, SEL_HSCIF2_2),
979 PINMUX_IPSR_MODSEL_DATA(IP3_24_22, SCIFB0_SCK_B, SEL_SCIFB_1), 978 PINMUX_IPSR_MSEL(IP3_24_22, SCIFB0_SCK_B, SEL_SCIFB_1),
980 PINMUX_IPSR_MODSEL_DATA(IP3_24_22, SCIFB2_SCK_B, SEL_SCIFB2_1), 979 PINMUX_IPSR_MSEL(IP3_24_22, SCIFB2_SCK_B, SEL_SCIFB2_1),
981 PINMUX_IPSR_MODSEL_DATA(IP3_24_22, DREQ2_C, SEL_LBS_2), 980 PINMUX_IPSR_MSEL(IP3_24_22, DREQ2_C, SEL_LBS_2),
982 PINMUX_IPSR_MODSEL_DATA(IP3_30_28, HTX2_C, SEL_HSCIF2_2), 981 PINMUX_IPSR_MSEL(IP3_30_28, HTX2_C, SEL_HSCIF2_2),
983 PINMUX_IPSR_MODSEL_DATA(IP3_27_25, SSI_SCK0129, SEL_SSI0_0), 982 PINMUX_IPSR_MSEL(IP3_27_25, SSI_SCK0129, SEL_SSI0_0),
984 PINMUX_IPSR_MODSEL_DATA(IP3_27_25, HRX0_C, SEL_HSCIF0_2), 983 PINMUX_IPSR_MSEL(IP3_27_25, HRX0_C, SEL_HSCIF0_2),
985 PINMUX_IPSR_MODSEL_DATA(IP3_27_25, HRX2_C, SEL_HSCIF2_2), 984 PINMUX_IPSR_MSEL(IP3_27_25, HRX2_C, SEL_HSCIF2_2),
986 PINMUX_IPSR_MODSEL_DATA(IP3_27_25, SCIFB0_RXD_C, SEL_SCIFB_2), 985 PINMUX_IPSR_MSEL(IP3_27_25, SCIFB0_RXD_C, SEL_SCIFB_2),
987 PINMUX_IPSR_MODSEL_DATA(IP3_27_25, SCIFB2_RXD_C, SEL_SCIFB2_2), 986 PINMUX_IPSR_MSEL(IP3_27_25, SCIFB2_RXD_C, SEL_SCIFB2_2),
988 PINMUX_IPSR_MODSEL_DATA(IP3_30_28, SSI_WS0129, SEL_SSI0_0), 987 PINMUX_IPSR_MSEL(IP3_30_28, SSI_WS0129, SEL_SSI0_0),
989 PINMUX_IPSR_MODSEL_DATA(IP3_30_28, HTX0_C, SEL_HSCIF0_2), 988 PINMUX_IPSR_MSEL(IP3_30_28, HTX0_C, SEL_HSCIF0_2),
990 PINMUX_IPSR_MODSEL_DATA(IP3_30_28, HTX2_C, SEL_HSCIF2_2), 989 PINMUX_IPSR_MSEL(IP3_30_28, HTX2_C, SEL_HSCIF2_2),
991 PINMUX_IPSR_MODSEL_DATA(IP3_30_28, SCIFB0_TXD_C, SEL_SCIFB_2), 990 PINMUX_IPSR_MSEL(IP3_30_28, SCIFB0_TXD_C, SEL_SCIFB_2),
992 PINMUX_IPSR_MODSEL_DATA(IP3_30_28, SCIFB2_TXD_C, SEL_SCIFB2_2), 991 PINMUX_IPSR_MSEL(IP3_30_28, SCIFB2_TXD_C, SEL_SCIFB2_2),
993 992
994 /* IPSR4 */ 993 /* IPSR4 */
995 PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SSI_SDATA0, SEL_SSI0_0), 994 PINMUX_IPSR_MSEL(IP4_1_0, SSI_SDATA0, SEL_SSI0_0),
996 PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCL0_B, SEL_IIC0_1), 995 PINMUX_IPSR_MSEL(IP4_1_0, SCL0_B, SEL_IIC0_1),
997 PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCL7_B, SEL_IIC7_1), 996 PINMUX_IPSR_MSEL(IP4_1_0, SCL7_B, SEL_IIC7_1),
998 PINMUX_IPSR_MODSEL_DATA(IP4_1_0, MSIOF2_SCK_C, SEL_SOF2_2), 997 PINMUX_IPSR_MSEL(IP4_1_0, MSIOF2_SCK_C, SEL_SOF2_2),
999 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SSI_SCK1, SEL_SSI1_0), 998 PINMUX_IPSR_MSEL(IP4_4_2, SSI_SCK1, SEL_SSI1_0),
1000 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SDA0_B, SEL_IIC0_1), 999 PINMUX_IPSR_MSEL(IP4_4_2, SDA0_B, SEL_IIC0_1),
1001 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SDA7_B, SEL_IIC7_1), 1000 PINMUX_IPSR_MSEL(IP4_4_2, SDA7_B, SEL_IIC7_1),
1002 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, MSIOF2_SYNC_C, SEL_SOF2_2), 1001 PINMUX_IPSR_MSEL(IP4_4_2, MSIOF2_SYNC_C, SEL_SOF2_2),
1003 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, GLO_I0_D, SEL_GPS_3), 1002 PINMUX_IPSR_MSEL(IP4_4_2, GLO_I0_D, SEL_GPS_3),
1004 PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SSI_WS1, SEL_SSI1_0), 1003 PINMUX_IPSR_MSEL(IP4_7_5, SSI_WS1, SEL_SSI1_0),
1005 PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SCL1_B, SEL_IIC1_1), 1004 PINMUX_IPSR_MSEL(IP4_7_5, SCL1_B, SEL_IIC1_1),
1006 PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SCL8_B, SEL_IIC8_1), 1005 PINMUX_IPSR_MSEL(IP4_7_5, SCL8_B, SEL_IIC8_1),
1007 PINMUX_IPSR_MODSEL_DATA(IP4_7_5, MSIOF2_TXD_C, SEL_SOF2_2), 1006 PINMUX_IPSR_MSEL(IP4_7_5, MSIOF2_TXD_C, SEL_SOF2_2),
1008 PINMUX_IPSR_MODSEL_DATA(IP4_7_5, GLO_I1_D, SEL_GPS_3), 1007 PINMUX_IPSR_MSEL(IP4_7_5, GLO_I1_D, SEL_GPS_3),
1009 PINMUX_IPSR_MODSEL_DATA(IP4_9_8, SSI_SDATA1, SEL_SSI1_0), 1008 PINMUX_IPSR_MSEL(IP4_9_8, SSI_SDATA1, SEL_SSI1_0),
1010 PINMUX_IPSR_MODSEL_DATA(IP4_9_8, SDA1_B, SEL_IIC1_1), 1009 PINMUX_IPSR_MSEL(IP4_9_8, SDA1_B, SEL_IIC1_1),
1011 PINMUX_IPSR_MODSEL_DATA(IP4_9_8, SDA8_B, SEL_IIC8_1), 1010 PINMUX_IPSR_MSEL(IP4_9_8, SDA8_B, SEL_IIC8_1),
1012 PINMUX_IPSR_MODSEL_DATA(IP4_9_8, MSIOF2_RXD_C, SEL_SOF2_2), 1011 PINMUX_IPSR_MSEL(IP4_9_8, MSIOF2_RXD_C, SEL_SOF2_2),
1013 PINMUX_IPSR_DATA(IP4_12_10, SSI_SCK2), 1012 PINMUX_IPSR_DATA(IP4_12_10, SSI_SCK2),
1014 PINMUX_IPSR_MODSEL_DATA(IP4_12_10, SCL2, SEL_IIC2_0), 1013 PINMUX_IPSR_MSEL(IP4_12_10, SCL2, SEL_IIC2_0),
1015 PINMUX_IPSR_MODSEL_DATA(IP4_12_10, GPS_CLK_B, SEL_GPS_1), 1014 PINMUX_IPSR_MSEL(IP4_12_10, GPS_CLK_B, SEL_GPS_1),
1016 PINMUX_IPSR_MODSEL_DATA(IP4_12_10, GLO_Q0_D, SEL_GPS_3), 1015 PINMUX_IPSR_MSEL(IP4_12_10, GLO_Q0_D, SEL_GPS_3),
1017 PINMUX_IPSR_DATA(IP4_15_13, SSI_WS2), 1016 PINMUX_IPSR_DATA(IP4_15_13, SSI_WS2),
1018 PINMUX_IPSR_MODSEL_DATA(IP4_15_13, SDA2, SEL_IIC2_0), 1017 PINMUX_IPSR_MSEL(IP4_15_13, SDA2, SEL_IIC2_0),
1019 PINMUX_IPSR_MODSEL_DATA(IP4_15_13, GPS_SIGN_B, SEL_GPS_1), 1018 PINMUX_IPSR_MSEL(IP4_15_13, GPS_SIGN_B, SEL_GPS_1),
1020 PINMUX_IPSR_MODSEL_DATA(IP4_15_13, RX2_E, SEL_SCIF2_4), 1019 PINMUX_IPSR_MSEL(IP4_15_13, RX2_E, SEL_SCIF2_4),
1021 PINMUX_IPSR_MODSEL_DATA(IP4_15_13, GLO_Q1_D, SEL_GPS_3), 1020 PINMUX_IPSR_MSEL(IP4_15_13, GLO_Q1_D, SEL_GPS_3),
1022 PINMUX_IPSR_DATA(IP4_18_16, SSI_SDATA2), 1021 PINMUX_IPSR_DATA(IP4_18_16, SSI_SDATA2),
1023 PINMUX_IPSR_MODSEL_DATA(IP4_18_16, GPS_MAG_B, SEL_GPS_1), 1022 PINMUX_IPSR_MSEL(IP4_18_16, GPS_MAG_B, SEL_GPS_1),
1024 PINMUX_IPSR_MODSEL_DATA(IP4_18_16, TX2_E, SEL_SCIF2_4), 1023 PINMUX_IPSR_MSEL(IP4_18_16, TX2_E, SEL_SCIF2_4),
1025 PINMUX_IPSR_DATA(IP4_19, SSI_SCK34), 1024 PINMUX_IPSR_DATA(IP4_19, SSI_SCK34),
1026 PINMUX_IPSR_DATA(IP4_20, SSI_WS34), 1025 PINMUX_IPSR_DATA(IP4_20, SSI_WS34),
1027 PINMUX_IPSR_DATA(IP4_21, SSI_SDATA3), 1026 PINMUX_IPSR_DATA(IP4_21, SSI_SDATA3),
1028 PINMUX_IPSR_DATA(IP4_23_22, SSI_SCK4), 1027 PINMUX_IPSR_DATA(IP4_23_22, SSI_SCK4),
1029 PINMUX_IPSR_MODSEL_DATA(IP4_23_22, GLO_SS_D, SEL_GPS_3), 1028 PINMUX_IPSR_MSEL(IP4_23_22, GLO_SS_D, SEL_GPS_3),
1030 PINMUX_IPSR_DATA(IP4_25_24, SSI_WS4), 1029 PINMUX_IPSR_DATA(IP4_25_24, SSI_WS4),
1031 PINMUX_IPSR_MODSEL_DATA(IP4_25_24, GLO_RFON_D, SEL_GPS_3), 1030 PINMUX_IPSR_MSEL(IP4_25_24, GLO_RFON_D, SEL_GPS_3),
1032 PINMUX_IPSR_DATA(IP4_27_26, SSI_SDATA4), 1031 PINMUX_IPSR_DATA(IP4_27_26, SSI_SDATA4),
1033 PINMUX_IPSR_MODSEL_DATA(IP4_27_26, MSIOF2_SCK_D, SEL_SOF2_3), 1032 PINMUX_IPSR_MSEL(IP4_27_26, MSIOF2_SCK_D, SEL_SOF2_3),
1034 PINMUX_IPSR_DATA(IP4_30_28, SSI_SCK5), 1033 PINMUX_IPSR_DATA(IP4_30_28, SSI_SCK5),
1035 PINMUX_IPSR_MODSEL_DATA(IP4_30_28, MSIOF1_SCK_C, SEL_SOF1_2), 1034 PINMUX_IPSR_MSEL(IP4_30_28, MSIOF1_SCK_C, SEL_SOF1_2),
1036 PINMUX_IPSR_MODSEL_DATA(IP4_30_28, TS_SDATA0, SEL_TSIF0_0), 1035 PINMUX_IPSR_MSEL(IP4_30_28, TS_SDATA0, SEL_TSIF0_0),
1037 PINMUX_IPSR_MODSEL_DATA(IP4_30_28, GLO_I0, SEL_GPS_0), 1036 PINMUX_IPSR_MSEL(IP4_30_28, GLO_I0, SEL_GPS_0),
1038 PINMUX_IPSR_MODSEL_DATA(IP4_30_28, MSIOF2_SYNC_D, SEL_SOF2_3), 1037 PINMUX_IPSR_MSEL(IP4_30_28, MSIOF2_SYNC_D, SEL_SOF2_3),
1039 PINMUX_IPSR_DATA(IP4_30_28, VI1_R2_B), 1038 PINMUX_IPSR_DATA(IP4_30_28, VI1_R2_B),
1040 1039
1041 /* IPSR5 */ 1040 /* IPSR5 */
1042 PINMUX_IPSR_DATA(IP5_2_0, SSI_WS5), 1041 PINMUX_IPSR_DATA(IP5_2_0, SSI_WS5),
1043 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, MSIOF1_SYNC_C, SEL_SOF1_2), 1042 PINMUX_IPSR_MSEL(IP5_2_0, MSIOF1_SYNC_C, SEL_SOF1_2),
1044 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, TS_SCK0, SEL_TSIF0_0), 1043 PINMUX_IPSR_MSEL(IP5_2_0, TS_SCK0, SEL_TSIF0_0),
1045 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, GLO_I1, SEL_GPS_0), 1044 PINMUX_IPSR_MSEL(IP5_2_0, GLO_I1, SEL_GPS_0),
1046 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, MSIOF2_TXD_D, SEL_SOF2_3), 1045 PINMUX_IPSR_MSEL(IP5_2_0, MSIOF2_TXD_D, SEL_SOF2_3),
1047 PINMUX_IPSR_DATA(IP5_2_0, VI1_R3_B), 1046 PINMUX_IPSR_DATA(IP5_2_0, VI1_R3_B),
1048 PINMUX_IPSR_DATA(IP5_5_3, SSI_SDATA5), 1047 PINMUX_IPSR_DATA(IP5_5_3, SSI_SDATA5),
1049 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, MSIOF1_TXD_C, SEL_SOF1_2), 1048 PINMUX_IPSR_MSEL(IP5_5_3, MSIOF1_TXD_C, SEL_SOF1_2),
1050 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, TS_SDEN0, SEL_TSIF0_0), 1049 PINMUX_IPSR_MSEL(IP5_5_3, TS_SDEN0, SEL_TSIF0_0),
1051 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, GLO_Q0, SEL_GPS_0), 1050 PINMUX_IPSR_MSEL(IP5_5_3, GLO_Q0, SEL_GPS_0),
1052 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, MSIOF2_SS1_D, SEL_SOF2_3), 1051 PINMUX_IPSR_MSEL(IP5_5_3, MSIOF2_SS1_D, SEL_SOF2_3),
1053 PINMUX_IPSR_DATA(IP5_5_3, VI1_R4_B), 1052 PINMUX_IPSR_DATA(IP5_5_3, VI1_R4_B),
1054 PINMUX_IPSR_DATA(IP5_8_6, SSI_SCK6), 1053 PINMUX_IPSR_DATA(IP5_8_6, SSI_SCK6),
1055 PINMUX_IPSR_MODSEL_DATA(IP5_8_6, MSIOF1_RXD_C, SEL_SOF1_2), 1054 PINMUX_IPSR_MSEL(IP5_8_6, MSIOF1_RXD_C, SEL_SOF1_2),
1056 PINMUX_IPSR_MODSEL_DATA(IP5_8_6, TS_SPSYNC0, SEL_TSIF0_0), 1055 PINMUX_IPSR_MSEL(IP5_8_6, TS_SPSYNC0, SEL_TSIF0_0),
1057 PINMUX_IPSR_MODSEL_DATA(IP5_8_6, GLO_Q1, SEL_GPS_0), 1056 PINMUX_IPSR_MSEL(IP5_8_6, GLO_Q1, SEL_GPS_0),
1058 PINMUX_IPSR_MODSEL_DATA(IP5_8_6, MSIOF2_RXD_D, SEL_SOF2_3), 1057 PINMUX_IPSR_MSEL(IP5_8_6, MSIOF2_RXD_D, SEL_SOF2_3),
1059 PINMUX_IPSR_DATA(IP5_8_6, VI1_R5_B), 1058 PINMUX_IPSR_DATA(IP5_8_6, VI1_R5_B),
1060 PINMUX_IPSR_DATA(IP5_11_9, SSI_WS6), 1059 PINMUX_IPSR_DATA(IP5_11_9, SSI_WS6),
1061 PINMUX_IPSR_MODSEL_DATA(IP5_11_9, GLO_SCLK, SEL_GPS_0), 1060 PINMUX_IPSR_MSEL(IP5_11_9, GLO_SCLK, SEL_GPS_0),
1062 PINMUX_IPSR_MODSEL_DATA(IP5_11_9, MSIOF2_SS2_D, SEL_SOF2_3), 1061 PINMUX_IPSR_MSEL(IP5_11_9, MSIOF2_SS2_D, SEL_SOF2_3),
1063 PINMUX_IPSR_DATA(IP5_11_9, VI1_R6_B), 1062 PINMUX_IPSR_DATA(IP5_11_9, VI1_R6_B),
1064 PINMUX_IPSR_DATA(IP5_14_12, SSI_SDATA6), 1063 PINMUX_IPSR_DATA(IP5_14_12, SSI_SDATA6),
1065 PINMUX_IPSR_MODSEL_DATA(IP5_14_12, STP_IVCXO27_0_B, SEL_SSP_1), 1064 PINMUX_IPSR_MSEL(IP5_14_12, STP_IVCXO27_0_B, SEL_SSP_1),
1066 PINMUX_IPSR_MODSEL_DATA(IP5_14_12, GLO_SDATA, SEL_GPS_0), 1065 PINMUX_IPSR_MSEL(IP5_14_12, GLO_SDATA, SEL_GPS_0),
1067 PINMUX_IPSR_DATA(IP5_14_12, VI1_R7_B), 1066 PINMUX_IPSR_DATA(IP5_14_12, VI1_R7_B),
1068 PINMUX_IPSR_MODSEL_DATA(IP5_16_15, SSI_SCK78, SEL_SSI7_0), 1067 PINMUX_IPSR_MSEL(IP5_16_15, SSI_SCK78, SEL_SSI7_0),
1069 PINMUX_IPSR_MODSEL_DATA(IP5_16_15, STP_ISCLK_0_B, SEL_SSP_1), 1068 PINMUX_IPSR_MSEL(IP5_16_15, STP_ISCLK_0_B, SEL_SSP_1),
1070 PINMUX_IPSR_MODSEL_DATA(IP5_16_15, GLO_SS, SEL_GPS_0), 1069 PINMUX_IPSR_MSEL(IP5_16_15, GLO_SS, SEL_GPS_0),
1071 PINMUX_IPSR_MODSEL_DATA(IP5_19_17, SSI_WS78, SEL_SSI7_0), 1070 PINMUX_IPSR_MSEL(IP5_19_17, SSI_WS78, SEL_SSI7_0),
1072 PINMUX_IPSR_MODSEL_DATA(IP5_19_17, TX0_D, SEL_SCIF0_3), 1071 PINMUX_IPSR_MSEL(IP5_19_17, TX0_D, SEL_SCIF0_3),
1073 PINMUX_IPSR_MODSEL_DATA(IP5_19_17, STP_ISD_0_B, SEL_SSP_1), 1072 PINMUX_IPSR_MSEL(IP5_19_17, STP_ISD_0_B, SEL_SSP_1),
1074 PINMUX_IPSR_MODSEL_DATA(IP5_19_17, GLO_RFON, SEL_GPS_0), 1073 PINMUX_IPSR_MSEL(IP5_19_17, GLO_RFON, SEL_GPS_0),
1075 PINMUX_IPSR_MODSEL_DATA(IP5_21_20, SSI_SDATA7, SEL_SSI7_0), 1074 PINMUX_IPSR_MSEL(IP5_21_20, SSI_SDATA7, SEL_SSI7_0),
1076 PINMUX_IPSR_MODSEL_DATA(IP5_21_20, RX0_D, SEL_SCIF0_3), 1075 PINMUX_IPSR_MSEL(IP5_21_20, RX0_D, SEL_SCIF0_3),
1077 PINMUX_IPSR_MODSEL_DATA(IP5_21_20, STP_ISEN_0_B, SEL_SSP_1), 1076 PINMUX_IPSR_MSEL(IP5_21_20, STP_ISEN_0_B, SEL_SSP_1),
1078 PINMUX_IPSR_MODSEL_DATA(IP5_23_22, SSI_SDATA8, SEL_SSI8_0), 1077 PINMUX_IPSR_MSEL(IP5_23_22, SSI_SDATA8, SEL_SSI8_0),
1079 PINMUX_IPSR_MODSEL_DATA(IP5_23_22, TX1_D, SEL_SCIF1_3), 1078 PINMUX_IPSR_MSEL(IP5_23_22, TX1_D, SEL_SCIF1_3),
1080 PINMUX_IPSR_MODSEL_DATA(IP5_23_22, STP_ISSYNC_0_B, SEL_SSP_1), 1079 PINMUX_IPSR_MSEL(IP5_23_22, STP_ISSYNC_0_B, SEL_SSP_1),
1081 PINMUX_IPSR_MODSEL_DATA(IP5_25_24, SSI_SCK9, SEL_SSI9_0), 1080 PINMUX_IPSR_MSEL(IP5_25_24, SSI_SCK9, SEL_SSI9_0),
1082 PINMUX_IPSR_MODSEL_DATA(IP5_25_24, RX1_D, SEL_SCIF1_3), 1081 PINMUX_IPSR_MSEL(IP5_25_24, RX1_D, SEL_SCIF1_3),
1083 PINMUX_IPSR_MODSEL_DATA(IP5_25_24, GLO_SCLK_D, SEL_GPS_3), 1082 PINMUX_IPSR_MSEL(IP5_25_24, GLO_SCLK_D, SEL_GPS_3),
1084 PINMUX_IPSR_MODSEL_DATA(IP5_28_26, SSI_WS9, SEL_SSI9_0), 1083 PINMUX_IPSR_MSEL(IP5_28_26, SSI_WS9, SEL_SSI9_0),
1085 PINMUX_IPSR_MODSEL_DATA(IP5_28_26, TX3_D, SEL_SCIF3_3), 1084 PINMUX_IPSR_MSEL(IP5_28_26, TX3_D, SEL_SCIF3_3),
1086 PINMUX_IPSR_MODSEL_DATA(IP5_28_26, CAN0_TX_D, SEL_CAN0_3), 1085 PINMUX_IPSR_MSEL(IP5_28_26, CAN0_TX_D, SEL_CAN0_3),
1087 PINMUX_IPSR_MODSEL_DATA(IP5_28_26, GLO_SDATA_D, SEL_GPS_3), 1086 PINMUX_IPSR_MSEL(IP5_28_26, GLO_SDATA_D, SEL_GPS_3),
1088 PINMUX_IPSR_MODSEL_DATA(IP5_31_29, SSI_SDATA9, SEL_SSI9_0), 1087 PINMUX_IPSR_MSEL(IP5_31_29, SSI_SDATA9, SEL_SSI9_0),
1089 PINMUX_IPSR_MODSEL_DATA(IP5_31_29, RX3_D, SEL_SCIF3_3), 1088 PINMUX_IPSR_MSEL(IP5_31_29, RX3_D, SEL_SCIF3_3),
1090 PINMUX_IPSR_MODSEL_DATA(IP5_31_29, CAN0_RX_D, SEL_CAN0_3), 1089 PINMUX_IPSR_MSEL(IP5_31_29, CAN0_RX_D, SEL_CAN0_3),
1091 1090
1092 /* IPSR6 */ 1091 /* IPSR6 */
1093 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, AUDIO_CLKB, SEL_ADG_0), 1092 PINMUX_IPSR_MSEL(IP6_2_0, AUDIO_CLKB, SEL_ADG_0),
1094 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, STP_OPWM_0_B, SEL_SSP_1), 1093 PINMUX_IPSR_MSEL(IP6_2_0, STP_OPWM_0_B, SEL_SSP_1),
1095 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, MSIOF1_SCK_B, SEL_SOF1_1), 1094 PINMUX_IPSR_MSEL(IP6_2_0, MSIOF1_SCK_B, SEL_SOF1_1),
1096 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SCIF_CLK, SEL_SCIF_0), 1095 PINMUX_IPSR_MSEL(IP6_2_0, SCIF_CLK, SEL_SCIF_0),
1097 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, BPFCLK_E, SEL_FM_4), 1096 PINMUX_IPSR_MSEL(IP6_2_0, BPFCLK_E, SEL_FM_4),
1098 PINMUX_IPSR_DATA(IP6_5_3, AUDIO_CLKC), 1097 PINMUX_IPSR_DATA(IP6_5_3, AUDIO_CLKC),
1099 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SCIFB0_SCK_C, SEL_SCIFB_2), 1098 PINMUX_IPSR_MSEL(IP6_5_3, SCIFB0_SCK_C, SEL_SCIFB_2),
1100 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, MSIOF1_SYNC_B, SEL_SOF1_1), 1099 PINMUX_IPSR_MSEL(IP6_5_3, MSIOF1_SYNC_B, SEL_SOF1_1),
1101 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, RX2, SEL_SCIF2_0), 1100 PINMUX_IPSR_MSEL(IP6_5_3, RX2, SEL_SCIF2_0),
1102 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SCIFA2_RXD, SEL_SCIFA2_0), 1101 PINMUX_IPSR_MSEL(IP6_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
1103 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, FMIN_E, SEL_FM_4), 1102 PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4),
1104 PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT), 1103 PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT),
1105 PINMUX_IPSR_MODSEL_DATA(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1), 1104 PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
1106 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, TX2, SEL_SCIF2_0), 1105 PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0),
1107 PINMUX_IPSR_MODSEL_DATA(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0), 1106 PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
1108 PINMUX_IPSR_DATA(IP6_9_8, IRQ0), 1107 PINMUX_IPSR_DATA(IP6_9_8, IRQ0),
1109 PINMUX_IPSR_MODSEL_DATA(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3), 1108 PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),
1110 PINMUX_IPSR_DATA(IP6_9_8, INTC_IRQ0_N), 1109 PINMUX_IPSR_DATA(IP6_9_8, INTC_IRQ0_N),
1111 PINMUX_IPSR_DATA(IP6_11_10, IRQ1), 1110 PINMUX_IPSR_DATA(IP6_11_10, IRQ1),
1112 PINMUX_IPSR_MODSEL_DATA(IP6_11_10, SCIFB1_SCK_C, SEL_SCIFB1_2), 1111 PINMUX_IPSR_MSEL(IP6_11_10, SCIFB1_SCK_C, SEL_SCIFB1_2),
1113 PINMUX_IPSR_DATA(IP6_11_10, INTC_IRQ1_N), 1112 PINMUX_IPSR_DATA(IP6_11_10, INTC_IRQ1_N),
1114 PINMUX_IPSR_DATA(IP6_13_12, IRQ2), 1113 PINMUX_IPSR_DATA(IP6_13_12, IRQ2),
1115 PINMUX_IPSR_MODSEL_DATA(IP6_13_12, SCIFB1_TXD_D, SEL_SCIFB1_3), 1114 PINMUX_IPSR_MSEL(IP6_13_12, SCIFB1_TXD_D, SEL_SCIFB1_3),
1116 PINMUX_IPSR_DATA(IP6_13_12, INTC_IRQ2_N), 1115 PINMUX_IPSR_DATA(IP6_13_12, INTC_IRQ2_N),
1117 PINMUX_IPSR_DATA(IP6_15_14, IRQ3), 1116 PINMUX_IPSR_DATA(IP6_15_14, IRQ3),
1118 PINMUX_IPSR_MODSEL_DATA(IP6_15_14, SCL4_C, SEL_IIC4_2), 1117 PINMUX_IPSR_MSEL(IP6_15_14, SCL4_C, SEL_IIC4_2),
1119 PINMUX_IPSR_MODSEL_DATA(IP6_15_14, MSIOF2_TXD_E, SEL_SOF2_4), 1118 PINMUX_IPSR_MSEL(IP6_15_14, MSIOF2_TXD_E, SEL_SOF2_4),
1120 PINMUX_IPSR_DATA(IP6_15_14, INTC_IRQ4_N), 1119 PINMUX_IPSR_DATA(IP6_15_14, INTC_IRQ4_N),
1121 PINMUX_IPSR_DATA(IP6_18_16, IRQ4), 1120 PINMUX_IPSR_DATA(IP6_18_16, IRQ4),
1122 PINMUX_IPSR_MODSEL_DATA(IP6_18_16, HRX1_C, SEL_HSCIF1_2), 1121 PINMUX_IPSR_MSEL(IP6_18_16, HRX1_C, SEL_HSCIF1_2),
1123 PINMUX_IPSR_MODSEL_DATA(IP6_18_16, SDA4_C, SEL_IIC4_2), 1122 PINMUX_IPSR_MSEL(IP6_18_16, SDA4_C, SEL_IIC4_2),
1124 PINMUX_IPSR_MODSEL_DATA(IP6_18_16, MSIOF2_RXD_E, SEL_SOF2_4), 1123 PINMUX_IPSR_MSEL(IP6_18_16, MSIOF2_RXD_E, SEL_SOF2_4),
1125 PINMUX_IPSR_DATA(IP6_18_16, INTC_IRQ4_N), 1124 PINMUX_IPSR_DATA(IP6_18_16, INTC_IRQ4_N),
1126 PINMUX_IPSR_DATA(IP6_20_19, IRQ5), 1125 PINMUX_IPSR_DATA(IP6_20_19, IRQ5),
1127 PINMUX_IPSR_MODSEL_DATA(IP6_20_19, HTX1_C, SEL_HSCIF1_2), 1126 PINMUX_IPSR_MSEL(IP6_20_19, HTX1_C, SEL_HSCIF1_2),
1128 PINMUX_IPSR_MODSEL_DATA(IP6_20_19, SCL1_E, SEL_IIC1_4), 1127 PINMUX_IPSR_MSEL(IP6_20_19, SCL1_E, SEL_IIC1_4),
1129 PINMUX_IPSR_MODSEL_DATA(IP6_20_19, MSIOF2_SCK_E, SEL_SOF2_4), 1128 PINMUX_IPSR_MSEL(IP6_20_19, MSIOF2_SCK_E, SEL_SOF2_4),
1130 PINMUX_IPSR_DATA(IP6_23_21, IRQ6), 1129 PINMUX_IPSR_DATA(IP6_23_21, IRQ6),
1131 PINMUX_IPSR_MODSEL_DATA(IP6_23_21, HSCK1_C, SEL_HSCIF1_2), 1130 PINMUX_IPSR_MSEL(IP6_23_21, HSCK1_C, SEL_HSCIF1_2),
1132 PINMUX_IPSR_MODSEL_DATA(IP6_23_21, MSIOF1_SS2_B, SEL_SOF1_1), 1131 PINMUX_IPSR_MSEL(IP6_23_21, MSIOF1_SS2_B, SEL_SOF1_1),
1133 PINMUX_IPSR_MODSEL_DATA(IP6_23_21, SDA1_E, SEL_IIC1_4), 1132 PINMUX_IPSR_MSEL(IP6_23_21, SDA1_E, SEL_IIC1_4),
1134 PINMUX_IPSR_MODSEL_DATA(IP6_23_21, MSIOF2_SYNC_E, SEL_SOF2_4), 1133 PINMUX_IPSR_MSEL(IP6_23_21, MSIOF2_SYNC_E, SEL_SOF2_4),
1135 PINMUX_IPSR_DATA(IP6_26_24, IRQ7), 1134 PINMUX_IPSR_DATA(IP6_26_24, IRQ7),
1136 PINMUX_IPSR_MODSEL_DATA(IP6_26_24, HCTS1_N_C, SEL_HSCIF1_2), 1135 PINMUX_IPSR_MSEL(IP6_26_24, HCTS1_N_C, SEL_HSCIF1_2),
1137 PINMUX_IPSR_MODSEL_DATA(IP6_26_24, MSIOF1_TXD_B, SEL_SOF1_1), 1136 PINMUX_IPSR_MSEL(IP6_26_24, MSIOF1_TXD_B, SEL_SOF1_1),
1138 PINMUX_IPSR_MODSEL_DATA(IP6_26_24, GPS_CLK_C, SEL_GPS_2), 1137 PINMUX_IPSR_MSEL(IP6_26_24, GPS_CLK_C, SEL_GPS_2),
1139 PINMUX_IPSR_MODSEL_DATA(IP6_26_24, GPS_CLK_D, SEL_GPS_3), 1138 PINMUX_IPSR_MSEL(IP6_26_24, GPS_CLK_D, SEL_GPS_3),
1140 PINMUX_IPSR_DATA(IP6_29_27, IRQ8), 1139 PINMUX_IPSR_DATA(IP6_29_27, IRQ8),
1141 PINMUX_IPSR_MODSEL_DATA(IP6_29_27, HRTS1_N_C, SEL_HSCIF1_2), 1140 PINMUX_IPSR_MSEL(IP6_29_27, HRTS1_N_C, SEL_HSCIF1_2),
1142 PINMUX_IPSR_MODSEL_DATA(IP6_29_27, MSIOF1_RXD_B, SEL_SOF1_1), 1141 PINMUX_IPSR_MSEL(IP6_29_27, MSIOF1_RXD_B, SEL_SOF1_1),
1143 PINMUX_IPSR_MODSEL_DATA(IP6_29_27, GPS_SIGN_C, SEL_GPS_2), 1142 PINMUX_IPSR_MSEL(IP6_29_27, GPS_SIGN_C, SEL_GPS_2),
1144 PINMUX_IPSR_MODSEL_DATA(IP6_29_27, GPS_SIGN_D, SEL_GPS_3), 1143 PINMUX_IPSR_MSEL(IP6_29_27, GPS_SIGN_D, SEL_GPS_3),
1145 1144
1146 /* IPSR7 */ 1145 /* IPSR7 */
1147 PINMUX_IPSR_DATA(IP7_2_0, IRQ9), 1146 PINMUX_IPSR_DATA(IP7_2_0, IRQ9),
1148 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, DU1_DOTCLKIN_B, SEL_DIS_1), 1147 PINMUX_IPSR_MSEL(IP7_2_0, DU1_DOTCLKIN_B, SEL_DIS_1),
1149 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, CAN_CLK_D, SEL_CANCLK_3), 1148 PINMUX_IPSR_MSEL(IP7_2_0, CAN_CLK_D, SEL_CANCLK_3),
1150 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, GPS_MAG_C, SEL_GPS_2), 1149 PINMUX_IPSR_MSEL(IP7_2_0, GPS_MAG_C, SEL_GPS_2),
1151 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, SCIF_CLK_B, SEL_SCIF_1), 1150 PINMUX_IPSR_MSEL(IP7_2_0, SCIF_CLK_B, SEL_SCIF_1),
1152 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, GPS_MAG_D, SEL_GPS_3), 1151 PINMUX_IPSR_MSEL(IP7_2_0, GPS_MAG_D, SEL_GPS_3),
1153 PINMUX_IPSR_DATA(IP7_5_3, DU1_DR0), 1152 PINMUX_IPSR_DATA(IP7_5_3, DU1_DR0),
1154 PINMUX_IPSR_DATA(IP7_5_3, LCDOUT0), 1153 PINMUX_IPSR_DATA(IP7_5_3, LCDOUT0),
1155 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, VI1_DATA0_B, SEL_VI1_1), 1154 PINMUX_IPSR_MSEL(IP7_5_3, VI1_DATA0_B, SEL_VI1_1),
1156 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, TX0_B, SEL_SCIF0_1), 1155 PINMUX_IPSR_MSEL(IP7_5_3, TX0_B, SEL_SCIF0_1),
1157 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, SCIFA0_TXD_B, SEL_SCFA_1), 1156 PINMUX_IPSR_MSEL(IP7_5_3, SCIFA0_TXD_B, SEL_SCFA_1),
1158 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, MSIOF2_SCK_B, SEL_SOF2_1), 1157 PINMUX_IPSR_MSEL(IP7_5_3, MSIOF2_SCK_B, SEL_SOF2_1),
1159 PINMUX_IPSR_DATA(IP7_8_6, DU1_DR1), 1158 PINMUX_IPSR_DATA(IP7_8_6, DU1_DR1),
1160 PINMUX_IPSR_DATA(IP7_8_6, LCDOUT1), 1159 PINMUX_IPSR_DATA(IP7_8_6, LCDOUT1),
1161 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, VI1_DATA1_B, SEL_VI1_1), 1160 PINMUX_IPSR_MSEL(IP7_8_6, VI1_DATA1_B, SEL_VI1_1),
1162 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RX0_B, SEL_SCIF0_1), 1161 PINMUX_IPSR_MSEL(IP7_8_6, RX0_B, SEL_SCIF0_1),
1163 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, SCIFA0_RXD_B, SEL_SCFA_1), 1162 PINMUX_IPSR_MSEL(IP7_8_6, SCIFA0_RXD_B, SEL_SCFA_1),
1164 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, MSIOF2_SYNC_B, SEL_SOF2_1), 1163 PINMUX_IPSR_MSEL(IP7_8_6, MSIOF2_SYNC_B, SEL_SOF2_1),
1165 PINMUX_IPSR_DATA(IP7_10_9, DU1_DR2), 1164 PINMUX_IPSR_DATA(IP7_10_9, DU1_DR2),
1166 PINMUX_IPSR_DATA(IP7_10_9, LCDOUT2), 1165 PINMUX_IPSR_DATA(IP7_10_9, LCDOUT2),
1167 PINMUX_IPSR_MODSEL_DATA(IP7_10_9, SSI_SCK0129_B, SEL_SSI0_1), 1166 PINMUX_IPSR_MSEL(IP7_10_9, SSI_SCK0129_B, SEL_SSI0_1),
1168 PINMUX_IPSR_DATA(IP7_12_11, DU1_DR3), 1167 PINMUX_IPSR_DATA(IP7_12_11, DU1_DR3),
1169 PINMUX_IPSR_DATA(IP7_12_11, LCDOUT3), 1168 PINMUX_IPSR_DATA(IP7_12_11, LCDOUT3),
1170 PINMUX_IPSR_MODSEL_DATA(IP7_12_11, SSI_WS0129_B, SEL_SSI0_1), 1169 PINMUX_IPSR_MSEL(IP7_12_11, SSI_WS0129_B, SEL_SSI0_1),
1171 PINMUX_IPSR_DATA(IP7_14_13, DU1_DR4), 1170 PINMUX_IPSR_DATA(IP7_14_13, DU1_DR4),
1172 PINMUX_IPSR_DATA(IP7_14_13, LCDOUT4), 1171 PINMUX_IPSR_DATA(IP7_14_13, LCDOUT4),
1173 PINMUX_IPSR_MODSEL_DATA(IP7_14_13, SSI_SDATA0_B, SEL_SSI0_1), 1172 PINMUX_IPSR_MSEL(IP7_14_13, SSI_SDATA0_B, SEL_SSI0_1),
1174 PINMUX_IPSR_DATA(IP7_16_15, DU1_DR5), 1173 PINMUX_IPSR_DATA(IP7_16_15, DU1_DR5),
1175 PINMUX_IPSR_DATA(IP7_16_15, LCDOUT5), 1174 PINMUX_IPSR_DATA(IP7_16_15, LCDOUT5),
1176 PINMUX_IPSR_MODSEL_DATA(IP7_16_15, SSI_SCK1_B, SEL_SSI1_1), 1175 PINMUX_IPSR_MSEL(IP7_16_15, SSI_SCK1_B, SEL_SSI1_1),
1177 PINMUX_IPSR_DATA(IP7_18_17, DU1_DR6), 1176 PINMUX_IPSR_DATA(IP7_18_17, DU1_DR6),
1178 PINMUX_IPSR_DATA(IP7_18_17, LCDOUT6), 1177 PINMUX_IPSR_DATA(IP7_18_17, LCDOUT6),
1179 PINMUX_IPSR_MODSEL_DATA(IP7_18_17, SSI_WS1_B, SEL_SSI1_1), 1178 PINMUX_IPSR_MSEL(IP7_18_17, SSI_WS1_B, SEL_SSI1_1),
1180 PINMUX_IPSR_DATA(IP7_20_19, DU1_DR7), 1179 PINMUX_IPSR_DATA(IP7_20_19, DU1_DR7),
1181 PINMUX_IPSR_DATA(IP7_20_19, LCDOUT7), 1180 PINMUX_IPSR_DATA(IP7_20_19, LCDOUT7),
1182 PINMUX_IPSR_MODSEL_DATA(IP7_20_19, SSI_SDATA1_B, SEL_SSI1_1), 1181 PINMUX_IPSR_MSEL(IP7_20_19, SSI_SDATA1_B, SEL_SSI1_1),
1183 PINMUX_IPSR_DATA(IP7_23_21, DU1_DG0), 1182 PINMUX_IPSR_DATA(IP7_23_21, DU1_DG0),
1184 PINMUX_IPSR_DATA(IP7_23_21, LCDOUT8), 1183 PINMUX_IPSR_DATA(IP7_23_21, LCDOUT8),
1185 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, VI1_DATA2_B, SEL_VI1_1), 1184 PINMUX_IPSR_MSEL(IP7_23_21, VI1_DATA2_B, SEL_VI1_1),
1186 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TX1_B, SEL_SCIF1_1), 1185 PINMUX_IPSR_MSEL(IP7_23_21, TX1_B, SEL_SCIF1_1),
1187 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, SCIFA1_TXD_B, SEL_SCIFA1_1), 1186 PINMUX_IPSR_MSEL(IP7_23_21, SCIFA1_TXD_B, SEL_SCIFA1_1),
1188 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, MSIOF2_SS1_B, SEL_SOF2_1), 1187 PINMUX_IPSR_MSEL(IP7_23_21, MSIOF2_SS1_B, SEL_SOF2_1),
1189 PINMUX_IPSR_DATA(IP7_26_24, DU1_DG1), 1188 PINMUX_IPSR_DATA(IP7_26_24, DU1_DG1),
1190 PINMUX_IPSR_DATA(IP7_26_24, LCDOUT9), 1189 PINMUX_IPSR_DATA(IP7_26_24, LCDOUT9),
1191 PINMUX_IPSR_MODSEL_DATA(IP7_26_24, VI1_DATA3_B, SEL_VI1_1), 1190 PINMUX_IPSR_MSEL(IP7_26_24, VI1_DATA3_B, SEL_VI1_1),
1192 PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RX1_B, SEL_SCIF1_1), 1191 PINMUX_IPSR_MSEL(IP7_26_24, RX1_B, SEL_SCIF1_1),
1193 PINMUX_IPSR_MODSEL_DATA(IP7_26_24, SCIFA1_RXD_B, SEL_SCIFA1_1), 1192 PINMUX_IPSR_MSEL(IP7_26_24, SCIFA1_RXD_B, SEL_SCIFA1_1),
1194 PINMUX_IPSR_MODSEL_DATA(IP7_26_24, MSIOF2_SS2_B, SEL_SOF2_1), 1193 PINMUX_IPSR_MSEL(IP7_26_24, MSIOF2_SS2_B, SEL_SOF2_1),
1195 PINMUX_IPSR_DATA(IP7_29_27, DU1_DG2), 1194 PINMUX_IPSR_DATA(IP7_29_27, DU1_DG2),
1196 PINMUX_IPSR_DATA(IP7_29_27, LCDOUT10), 1195 PINMUX_IPSR_DATA(IP7_29_27, LCDOUT10),
1197 PINMUX_IPSR_MODSEL_DATA(IP7_29_27, VI1_DATA4_B, SEL_VI1_1), 1196 PINMUX_IPSR_MSEL(IP7_29_27, VI1_DATA4_B, SEL_VI1_1),
1198 PINMUX_IPSR_DATA(IP7_29_27, SCIF1_SCK_B), 1197 PINMUX_IPSR_DATA(IP7_29_27, SCIF1_SCK_B),
1199 PINMUX_IPSR_MODSEL_DATA(IP7_29_27, SCIFA1_SCK, SEL_SCIFA1_0), 1198 PINMUX_IPSR_MSEL(IP7_29_27, SCIFA1_SCK, SEL_SCIFA1_0),
1200 PINMUX_IPSR_MODSEL_DATA(IP7_29_27, SSI_SCK78_B, SEL_SSI7_1), 1199 PINMUX_IPSR_MSEL(IP7_29_27, SSI_SCK78_B, SEL_SSI7_1),
1201 1200
1202 /* IPSR8 */ 1201 /* IPSR8 */
1203 PINMUX_IPSR_DATA(IP8_2_0, DU1_DG3), 1202 PINMUX_IPSR_DATA(IP8_2_0, DU1_DG3),
1204 PINMUX_IPSR_DATA(IP8_2_0, LCDOUT11), 1203 PINMUX_IPSR_DATA(IP8_2_0, LCDOUT11),
1205 PINMUX_IPSR_MODSEL_DATA(IP8_2_0, VI1_DATA5_B, SEL_VI1_1), 1204 PINMUX_IPSR_MSEL(IP8_2_0, VI1_DATA5_B, SEL_VI1_1),
1206 PINMUX_IPSR_MODSEL_DATA(IP8_2_0, SSI_WS78_B, SEL_SSI7_1), 1205 PINMUX_IPSR_MSEL(IP8_2_0, SSI_WS78_B, SEL_SSI7_1),
1207 PINMUX_IPSR_DATA(IP8_5_3, DU1_DG4), 1206 PINMUX_IPSR_DATA(IP8_5_3, DU1_DG4),
1208 PINMUX_IPSR_DATA(IP8_5_3, LCDOUT12), 1207 PINMUX_IPSR_DATA(IP8_5_3, LCDOUT12),
1209 PINMUX_IPSR_MODSEL_DATA(IP8_5_3, VI1_DATA6_B, SEL_VI1_1), 1208 PINMUX_IPSR_MSEL(IP8_5_3, VI1_DATA6_B, SEL_VI1_1),
1210 PINMUX_IPSR_MODSEL_DATA(IP8_5_3, HRX0_B, SEL_HSCIF0_1), 1209 PINMUX_IPSR_MSEL(IP8_5_3, HRX0_B, SEL_HSCIF0_1),
1211 PINMUX_IPSR_MODSEL_DATA(IP8_5_3, SCIFB2_RXD_B, SEL_SCIFB2_1), 1210 PINMUX_IPSR_MSEL(IP8_5_3, SCIFB2_RXD_B, SEL_SCIFB2_1),
1212 PINMUX_IPSR_MODSEL_DATA(IP8_5_3, SSI_SDATA7_B, SEL_SSI7_1), 1211 PINMUX_IPSR_MSEL(IP8_5_3, SSI_SDATA7_B, SEL_SSI7_1),
1213 PINMUX_IPSR_DATA(IP8_8_6, DU1_DG5), 1212 PINMUX_IPSR_DATA(IP8_8_6, DU1_DG5),
1214 PINMUX_IPSR_DATA(IP8_8_6, LCDOUT13), 1213 PINMUX_IPSR_DATA(IP8_8_6, LCDOUT13),
1215 PINMUX_IPSR_MODSEL_DATA(IP8_8_6, VI1_DATA7_B, SEL_VI1_1), 1214 PINMUX_IPSR_MSEL(IP8_8_6, VI1_DATA7_B, SEL_VI1_1),
1216 PINMUX_IPSR_MODSEL_DATA(IP8_8_6, HCTS0_N_B, SEL_HSCIF0_1), 1215 PINMUX_IPSR_MSEL(IP8_8_6, HCTS0_N_B, SEL_HSCIF0_1),
1217 PINMUX_IPSR_MODSEL_DATA(IP8_8_6, SCIFB2_TXD_B, SEL_SCIFB2_1), 1216 PINMUX_IPSR_MSEL(IP8_8_6, SCIFB2_TXD_B, SEL_SCIFB2_1),
1218 PINMUX_IPSR_MODSEL_DATA(IP8_8_6, SSI_SDATA8_B, SEL_SSI8_1), 1217 PINMUX_IPSR_MSEL(IP8_8_6, SSI_SDATA8_B, SEL_SSI8_1),
1219 PINMUX_IPSR_DATA(IP8_11_9, DU1_DG6), 1218 PINMUX_IPSR_DATA(IP8_11_9, DU1_DG6),
1220 PINMUX_IPSR_DATA(IP8_11_9, LCDOUT14), 1219 PINMUX_IPSR_DATA(IP8_11_9, LCDOUT14),
1221 PINMUX_IPSR_MODSEL_DATA(IP8_11_9, HRTS0_N_B, SEL_HSCIF0_1), 1220 PINMUX_IPSR_MSEL(IP8_11_9, HRTS0_N_B, SEL_HSCIF0_1),
1222 PINMUX_IPSR_MODSEL_DATA(IP8_11_9, SCIFB2_CTS_N_B, SEL_SCIFB2_1), 1221 PINMUX_IPSR_MSEL(IP8_11_9, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
1223 PINMUX_IPSR_MODSEL_DATA(IP8_11_9, SSI_SCK9_B, SEL_SSI9_1), 1222 PINMUX_IPSR_MSEL(IP8_11_9, SSI_SCK9_B, SEL_SSI9_1),
1224 PINMUX_IPSR_DATA(IP8_14_12, DU1_DG7), 1223 PINMUX_IPSR_DATA(IP8_14_12, DU1_DG7),
1225 PINMUX_IPSR_DATA(IP8_14_12, LCDOUT15), 1224 PINMUX_IPSR_DATA(IP8_14_12, LCDOUT15),
1226 PINMUX_IPSR_MODSEL_DATA(IP8_14_12, HTX0_B, SEL_HSCIF0_1), 1225 PINMUX_IPSR_MSEL(IP8_14_12, HTX0_B, SEL_HSCIF0_1),
1227 PINMUX_IPSR_MODSEL_DATA(IP8_14_12, SCIFB2_RTS_N_B, SEL_SCIFB2_1), 1226 PINMUX_IPSR_MSEL(IP8_14_12, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
1228 PINMUX_IPSR_MODSEL_DATA(IP8_14_12, SSI_WS9_B, SEL_SSI9_1), 1227 PINMUX_IPSR_MSEL(IP8_14_12, SSI_WS9_B, SEL_SSI9_1),
1229 PINMUX_IPSR_DATA(IP8_17_15, DU1_DB0), 1228 PINMUX_IPSR_DATA(IP8_17_15, DU1_DB0),
1230 PINMUX_IPSR_DATA(IP8_17_15, LCDOUT16), 1229 PINMUX_IPSR_DATA(IP8_17_15, LCDOUT16),
1231 PINMUX_IPSR_MODSEL_DATA(IP8_17_15, VI1_CLK_B, SEL_VI1_1), 1230 PINMUX_IPSR_MSEL(IP8_17_15, VI1_CLK_B, SEL_VI1_1),
1232 PINMUX_IPSR_MODSEL_DATA(IP8_17_15, TX2_B, SEL_SCIF2_1), 1231 PINMUX_IPSR_MSEL(IP8_17_15, TX2_B, SEL_SCIF2_1),
1233 PINMUX_IPSR_MODSEL_DATA(IP8_17_15, SCIFA2_TXD_B, SEL_SCIFA2_1), 1232 PINMUX_IPSR_MSEL(IP8_17_15, SCIFA2_TXD_B, SEL_SCIFA2_1),
1234 PINMUX_IPSR_MODSEL_DATA(IP8_17_15, MSIOF2_TXD_B, SEL_SOF2_1), 1233 PINMUX_IPSR_MSEL(IP8_17_15, MSIOF2_TXD_B, SEL_SOF2_1),
1235 PINMUX_IPSR_DATA(IP8_20_18, DU1_DB1), 1234 PINMUX_IPSR_DATA(IP8_20_18, DU1_DB1),
1236 PINMUX_IPSR_DATA(IP8_20_18, LCDOUT17), 1235 PINMUX_IPSR_DATA(IP8_20_18, LCDOUT17),
1237 PINMUX_IPSR_MODSEL_DATA(IP8_20_18, VI1_HSYNC_N_B, SEL_VI1_1), 1236 PINMUX_IPSR_MSEL(IP8_20_18, VI1_HSYNC_N_B, SEL_VI1_1),
1238 PINMUX_IPSR_MODSEL_DATA(IP8_20_18, RX2_B, SEL_SCIF2_1), 1237 PINMUX_IPSR_MSEL(IP8_20_18, RX2_B, SEL_SCIF2_1),
1239 PINMUX_IPSR_MODSEL_DATA(IP8_20_18, SCIFA2_RXD_B, SEL_SCIFA2_1), 1238 PINMUX_IPSR_MSEL(IP8_20_18, SCIFA2_RXD_B, SEL_SCIFA2_1),
1240 PINMUX_IPSR_MODSEL_DATA(IP8_20_18, MSIOF2_RXD_B, SEL_SOF2_1), 1239 PINMUX_IPSR_MSEL(IP8_20_18, MSIOF2_RXD_B, SEL_SOF2_1),
1241 PINMUX_IPSR_DATA(IP8_23_21, DU1_DB2), 1240 PINMUX_IPSR_DATA(IP8_23_21, DU1_DB2),
1242 PINMUX_IPSR_DATA(IP8_23_21, LCDOUT18), 1241 PINMUX_IPSR_DATA(IP8_23_21, LCDOUT18),
1243 PINMUX_IPSR_MODSEL_DATA(IP8_23_21, VI1_VSYNC_N_B, SEL_VI1_1), 1242 PINMUX_IPSR_MSEL(IP8_23_21, VI1_VSYNC_N_B, SEL_VI1_1),
1244 PINMUX_IPSR_DATA(IP8_23_21, SCIF2_SCK_B), 1243 PINMUX_IPSR_DATA(IP8_23_21, SCIF2_SCK_B),
1245 PINMUX_IPSR_MODSEL_DATA(IP8_23_21, SCIFA2_SCK, SEL_SCIFA2_1), 1244 PINMUX_IPSR_MSEL(IP8_23_21, SCIFA2_SCK, SEL_SCIFA2_1),
1246 PINMUX_IPSR_MODSEL_DATA(IP8_23_21, SSI_SDATA9_B, SEL_SSI9_1), 1245 PINMUX_IPSR_MSEL(IP8_23_21, SSI_SDATA9_B, SEL_SSI9_1),
1247 PINMUX_IPSR_DATA(IP8_25_24, DU1_DB3), 1246 PINMUX_IPSR_DATA(IP8_25_24, DU1_DB3),
1248 PINMUX_IPSR_DATA(IP8_25_24, LCDOUT19), 1247 PINMUX_IPSR_DATA(IP8_25_24, LCDOUT19),
1249 PINMUX_IPSR_MODSEL_DATA(IP8_25_24, VI1_CLKENB_B, SEL_VI1_1), 1248 PINMUX_IPSR_MSEL(IP8_25_24, VI1_CLKENB_B, SEL_VI1_1),
1250 PINMUX_IPSR_DATA(IP8_27_26, DU1_DB4), 1249 PINMUX_IPSR_DATA(IP8_27_26, DU1_DB4),
1251 PINMUX_IPSR_DATA(IP8_27_26, LCDOUT20), 1250 PINMUX_IPSR_DATA(IP8_27_26, LCDOUT20),
1252 PINMUX_IPSR_MODSEL_DATA(IP8_27_26, VI1_FIELD_B, SEL_VI1_1), 1251 PINMUX_IPSR_MSEL(IP8_27_26, VI1_FIELD_B, SEL_VI1_1),
1253 PINMUX_IPSR_MODSEL_DATA(IP8_27_26, CAN1_RX, SEL_CAN1_0), 1252 PINMUX_IPSR_MSEL(IP8_27_26, CAN1_RX, SEL_CAN1_0),
1254 PINMUX_IPSR_DATA(IP8_30_28, DU1_DB5), 1253 PINMUX_IPSR_DATA(IP8_30_28, DU1_DB5),
1255 PINMUX_IPSR_DATA(IP8_30_28, LCDOUT21), 1254 PINMUX_IPSR_DATA(IP8_30_28, LCDOUT21),
1256 PINMUX_IPSR_MODSEL_DATA(IP8_30_28, TX3, SEL_SCIF3_0), 1255 PINMUX_IPSR_MSEL(IP8_30_28, TX3, SEL_SCIF3_0),
1257 PINMUX_IPSR_MODSEL_DATA(IP8_30_28, SCIFA3_TXD, SEL_SCIFA3_0), 1256 PINMUX_IPSR_MSEL(IP8_30_28, SCIFA3_TXD, SEL_SCIFA3_0),
1258 PINMUX_IPSR_MODSEL_DATA(IP8_30_28, CAN1_TX, SEL_CAN1_0), 1257 PINMUX_IPSR_MSEL(IP8_30_28, CAN1_TX, SEL_CAN1_0),
1259 1258
1260 /* IPSR9 */ 1259 /* IPSR9 */
1261 PINMUX_IPSR_DATA(IP9_2_0, DU1_DB6), 1260 PINMUX_IPSR_DATA(IP9_2_0, DU1_DB6),
1262 PINMUX_IPSR_DATA(IP9_2_0, LCDOUT22), 1261 PINMUX_IPSR_DATA(IP9_2_0, LCDOUT22),
1263 PINMUX_IPSR_MODSEL_DATA(IP9_2_0, SCL3_C, SEL_IIC3_2), 1262 PINMUX_IPSR_MSEL(IP9_2_0, SCL3_C, SEL_IIC3_2),
1264 PINMUX_IPSR_MODSEL_DATA(IP9_2_0, RX3, SEL_SCIF3_0), 1263 PINMUX_IPSR_MSEL(IP9_2_0, RX3, SEL_SCIF3_0),
1265 PINMUX_IPSR_MODSEL_DATA(IP9_2_0, SCIFA3_RXD, SEL_SCIFA3_0), 1264 PINMUX_IPSR_MSEL(IP9_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
1266 PINMUX_IPSR_DATA(IP9_5_3, DU1_DB7), 1265 PINMUX_IPSR_DATA(IP9_5_3, DU1_DB7),
1267 PINMUX_IPSR_DATA(IP9_5_3, LCDOUT23), 1266 PINMUX_IPSR_DATA(IP9_5_3, LCDOUT23),
1268 PINMUX_IPSR_MODSEL_DATA(IP9_5_3, SDA3_C, SEL_IIC3_2), 1267 PINMUX_IPSR_MSEL(IP9_5_3, SDA3_C, SEL_IIC3_2),
1269 PINMUX_IPSR_MODSEL_DATA(IP9_5_3, SCIF3_SCK, SEL_SCIF3_0), 1268 PINMUX_IPSR_MSEL(IP9_5_3, SCIF3_SCK, SEL_SCIF3_0),
1270 PINMUX_IPSR_MODSEL_DATA(IP9_5_3, SCIFA3_SCK, SEL_SCIFA3_0), 1269 PINMUX_IPSR_MSEL(IP9_5_3, SCIFA3_SCK, SEL_SCIFA3_0),
1271 PINMUX_IPSR_MODSEL_DATA(IP9_6, DU1_DOTCLKIN, SEL_DIS_0), 1270 PINMUX_IPSR_MSEL(IP9_6, DU1_DOTCLKIN, SEL_DIS_0),
1272 PINMUX_IPSR_DATA(IP9_6, QSTVA_QVS), 1271 PINMUX_IPSR_DATA(IP9_6, QSTVA_QVS),
1273 PINMUX_IPSR_DATA(IP9_7, DU1_DOTCLKOUT0), 1272 PINMUX_IPSR_DATA(IP9_7, DU1_DOTCLKOUT0),
1274 PINMUX_IPSR_DATA(IP9_7, QCLK), 1273 PINMUX_IPSR_DATA(IP9_7, QCLK),
1275 PINMUX_IPSR_DATA(IP9_10_8, DU1_DOTCLKOUT1), 1274 PINMUX_IPSR_DATA(IP9_10_8, DU1_DOTCLKOUT1),
1276 PINMUX_IPSR_DATA(IP9_10_8, QSTVB_QVE), 1275 PINMUX_IPSR_DATA(IP9_10_8, QSTVB_QVE),
1277 PINMUX_IPSR_MODSEL_DATA(IP9_10_8, CAN0_TX, SEL_CAN0_0), 1276 PINMUX_IPSR_MSEL(IP9_10_8, CAN0_TX, SEL_CAN0_0),
1278 PINMUX_IPSR_MODSEL_DATA(IP9_10_8, TX3_B, SEL_SCIF3_1), 1277 PINMUX_IPSR_MSEL(IP9_10_8, TX3_B, SEL_SCIF3_1),
1279 PINMUX_IPSR_MODSEL_DATA(IP9_10_8, SCL2_B, SEL_IIC2_1), 1278 PINMUX_IPSR_MSEL(IP9_10_8, SCL2_B, SEL_IIC2_1),
1280 PINMUX_IPSR_DATA(IP9_10_8, PWM4), 1279 PINMUX_IPSR_DATA(IP9_10_8, PWM4),
1281 PINMUX_IPSR_DATA(IP9_11, DU1_EXHSYNC_DU1_HSYNC), 1280 PINMUX_IPSR_DATA(IP9_11, DU1_EXHSYNC_DU1_HSYNC),
1282 PINMUX_IPSR_DATA(IP9_11, QSTH_QHS), 1281 PINMUX_IPSR_DATA(IP9_11, QSTH_QHS),
@@ -1284,280 +1283,280 @@ static const u16 pinmux_data[] = {
1284 PINMUX_IPSR_DATA(IP9_12, QSTB_QHE), 1283 PINMUX_IPSR_DATA(IP9_12, QSTB_QHE),
1285 PINMUX_IPSR_DATA(IP9_15_13, DU1_EXODDF_DU1_ODDF_DISP_CDE), 1284 PINMUX_IPSR_DATA(IP9_15_13, DU1_EXODDF_DU1_ODDF_DISP_CDE),
1286 PINMUX_IPSR_DATA(IP9_15_13, QCPV_QDE), 1285 PINMUX_IPSR_DATA(IP9_15_13, QCPV_QDE),
1287 PINMUX_IPSR_MODSEL_DATA(IP9_15_13, CAN0_RX, SEL_CAN0_0), 1286 PINMUX_IPSR_MSEL(IP9_15_13, CAN0_RX, SEL_CAN0_0),
1288 PINMUX_IPSR_MODSEL_DATA(IP9_15_13, RX3_B, SEL_SCIF3_1), 1287 PINMUX_IPSR_MSEL(IP9_15_13, RX3_B, SEL_SCIF3_1),
1289 PINMUX_IPSR_MODSEL_DATA(IP9_15_13, SDA2_B, SEL_IIC2_1), 1288 PINMUX_IPSR_MSEL(IP9_15_13, SDA2_B, SEL_IIC2_1),
1290 PINMUX_IPSR_DATA(IP9_16, DU1_DISP), 1289 PINMUX_IPSR_DATA(IP9_16, DU1_DISP),
1291 PINMUX_IPSR_DATA(IP9_16, QPOLA), 1290 PINMUX_IPSR_DATA(IP9_16, QPOLA),
1292 PINMUX_IPSR_DATA(IP9_18_17, DU1_CDE), 1291 PINMUX_IPSR_DATA(IP9_18_17, DU1_CDE),
1293 PINMUX_IPSR_DATA(IP9_18_17, QPOLB), 1292 PINMUX_IPSR_DATA(IP9_18_17, QPOLB),
1294 PINMUX_IPSR_DATA(IP9_18_17, PWM4_B), 1293 PINMUX_IPSR_DATA(IP9_18_17, PWM4_B),
1295 PINMUX_IPSR_DATA(IP9_20_19, VI0_CLKENB), 1294 PINMUX_IPSR_DATA(IP9_20_19, VI0_CLKENB),
1296 PINMUX_IPSR_MODSEL_DATA(IP9_20_19, TX4, SEL_SCIF4_0), 1295 PINMUX_IPSR_MSEL(IP9_20_19, TX4, SEL_SCIF4_0),
1297 PINMUX_IPSR_MODSEL_DATA(IP9_20_19, SCIFA4_TXD, SEL_SCIFA4_0), 1296 PINMUX_IPSR_MSEL(IP9_20_19, SCIFA4_TXD, SEL_SCIFA4_0),
1298 PINMUX_IPSR_MODSEL_DATA(IP9_20_19, TS_SDATA0_D, SEL_TSIF0_3), 1297 PINMUX_IPSR_MSEL(IP9_20_19, TS_SDATA0_D, SEL_TSIF0_3),
1299 PINMUX_IPSR_DATA(IP9_22_21, VI0_FIELD), 1298 PINMUX_IPSR_DATA(IP9_22_21, VI0_FIELD),
1300 PINMUX_IPSR_MODSEL_DATA(IP9_22_21, RX4, SEL_SCIF4_0), 1299 PINMUX_IPSR_MSEL(IP9_22_21, RX4, SEL_SCIF4_0),
1301 PINMUX_IPSR_MODSEL_DATA(IP9_22_21, SCIFA4_RXD, SEL_SCIFA4_0), 1300 PINMUX_IPSR_MSEL(IP9_22_21, SCIFA4_RXD, SEL_SCIFA4_0),
1302 PINMUX_IPSR_MODSEL_DATA(IP9_22_21, TS_SCK0_D, SEL_TSIF0_3), 1301 PINMUX_IPSR_MSEL(IP9_22_21, TS_SCK0_D, SEL_TSIF0_3),
1303 PINMUX_IPSR_DATA(IP9_24_23, VI0_HSYNC_N), 1302 PINMUX_IPSR_DATA(IP9_24_23, VI0_HSYNC_N),
1304 PINMUX_IPSR_MODSEL_DATA(IP9_24_23, TX5, SEL_SCIF5_0), 1303 PINMUX_IPSR_MSEL(IP9_24_23, TX5, SEL_SCIF5_0),
1305 PINMUX_IPSR_MODSEL_DATA(IP9_24_23, SCIFA5_TXD, SEL_SCIFA5_0), 1304 PINMUX_IPSR_MSEL(IP9_24_23, SCIFA5_TXD, SEL_SCIFA5_0),
1306 PINMUX_IPSR_MODSEL_DATA(IP9_24_23, TS_SDEN0_D, SEL_TSIF0_3), 1305 PINMUX_IPSR_MSEL(IP9_24_23, TS_SDEN0_D, SEL_TSIF0_3),
1307 PINMUX_IPSR_DATA(IP9_26_25, VI0_VSYNC_N), 1306 PINMUX_IPSR_DATA(IP9_26_25, VI0_VSYNC_N),
1308 PINMUX_IPSR_MODSEL_DATA(IP9_26_25, RX5, SEL_SCIF5_0), 1307 PINMUX_IPSR_MSEL(IP9_26_25, RX5, SEL_SCIF5_0),
1309 PINMUX_IPSR_MODSEL_DATA(IP9_26_25, SCIFA5_RXD, SEL_SCIFA5_0), 1308 PINMUX_IPSR_MSEL(IP9_26_25, SCIFA5_RXD, SEL_SCIFA5_0),
1310 PINMUX_IPSR_MODSEL_DATA(IP9_26_25, TS_SPSYNC0_D, SEL_TSIF0_3), 1309 PINMUX_IPSR_MSEL(IP9_26_25, TS_SPSYNC0_D, SEL_TSIF0_3),
1311 PINMUX_IPSR_DATA(IP9_28_27, VI0_DATA3_VI0_B3), 1310 PINMUX_IPSR_DATA(IP9_28_27, VI0_DATA3_VI0_B3),
1312 PINMUX_IPSR_MODSEL_DATA(IP9_28_27, SCIF3_SCK_B, SEL_SCIF3_1), 1311 PINMUX_IPSR_MSEL(IP9_28_27, SCIF3_SCK_B, SEL_SCIF3_1),
1313 PINMUX_IPSR_MODSEL_DATA(IP9_28_27, SCIFA3_SCK_B, SEL_SCIFA3_1), 1312 PINMUX_IPSR_MSEL(IP9_28_27, SCIFA3_SCK_B, SEL_SCIFA3_1),
1314 PINMUX_IPSR_DATA(IP9_31_29, VI0_G0), 1313 PINMUX_IPSR_DATA(IP9_31_29, VI0_G0),
1315 PINMUX_IPSR_MODSEL_DATA(IP9_31_29, SCL8, SEL_IIC8_0), 1314 PINMUX_IPSR_MSEL(IP9_31_29, SCL8, SEL_IIC8_0),
1316 PINMUX_IPSR_MODSEL_DATA(IP9_31_29, STP_IVCXO27_0_C, SEL_SSP_2), 1315 PINMUX_IPSR_MSEL(IP9_31_29, STP_IVCXO27_0_C, SEL_SSP_2),
1317 PINMUX_IPSR_MODSEL_DATA(IP9_31_29, SCL4, SEL_IIC4_0), 1316 PINMUX_IPSR_MSEL(IP9_31_29, SCL4, SEL_IIC4_0),
1318 PINMUX_IPSR_MODSEL_DATA(IP9_31_29, HCTS2_N, SEL_HSCIF2_0), 1317 PINMUX_IPSR_MSEL(IP9_31_29, HCTS2_N, SEL_HSCIF2_0),
1319 PINMUX_IPSR_MODSEL_DATA(IP9_31_29, SCIFB2_CTS_N, SEL_SCIFB2_0), 1318 PINMUX_IPSR_MSEL(IP9_31_29, SCIFB2_CTS_N, SEL_SCIFB2_0),
1320 PINMUX_IPSR_DATA(IP9_31_29, ATAWR1_N), 1319 PINMUX_IPSR_DATA(IP9_31_29, ATAWR1_N),
1321 1320
1322 /* IPSR10 */ 1321 /* IPSR10 */
1323 PINMUX_IPSR_DATA(IP10_2_0, VI0_G1), 1322 PINMUX_IPSR_DATA(IP10_2_0, VI0_G1),
1324 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SDA8, SEL_IIC8_0), 1323 PINMUX_IPSR_MSEL(IP10_2_0, SDA8, SEL_IIC8_0),
1325 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, STP_ISCLK_0_C, SEL_SSP_2), 1324 PINMUX_IPSR_MSEL(IP10_2_0, STP_ISCLK_0_C, SEL_SSP_2),
1326 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SDA4, SEL_IIC4_0), 1325 PINMUX_IPSR_MSEL(IP10_2_0, SDA4, SEL_IIC4_0),
1327 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, HRTS2_N, SEL_HSCIF2_0), 1326 PINMUX_IPSR_MSEL(IP10_2_0, HRTS2_N, SEL_HSCIF2_0),
1328 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SCIFB2_RTS_N, SEL_SCIFB2_0), 1327 PINMUX_IPSR_MSEL(IP10_2_0, SCIFB2_RTS_N, SEL_SCIFB2_0),
1329 PINMUX_IPSR_DATA(IP10_2_0, ATADIR1_N), 1328 PINMUX_IPSR_DATA(IP10_2_0, ATADIR1_N),
1330 PINMUX_IPSR_DATA(IP10_5_3, VI0_G2), 1329 PINMUX_IPSR_DATA(IP10_5_3, VI0_G2),
1331 PINMUX_IPSR_DATA(IP10_5_3, VI2_HSYNC_N), 1330 PINMUX_IPSR_DATA(IP10_5_3, VI2_HSYNC_N),
1332 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, STP_ISD_0_C, SEL_SSP_2), 1331 PINMUX_IPSR_MSEL(IP10_5_3, STP_ISD_0_C, SEL_SSP_2),
1333 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SCL3_B, SEL_IIC3_1), 1332 PINMUX_IPSR_MSEL(IP10_5_3, SCL3_B, SEL_IIC3_1),
1334 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, HSCK2, SEL_HSCIF2_0), 1333 PINMUX_IPSR_MSEL(IP10_5_3, HSCK2, SEL_HSCIF2_0),
1335 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SCIFB2_SCK, SEL_SCIFB2_0), 1334 PINMUX_IPSR_MSEL(IP10_5_3, SCIFB2_SCK, SEL_SCIFB2_0),
1336 PINMUX_IPSR_DATA(IP10_5_3, ATARD1_N), 1335 PINMUX_IPSR_DATA(IP10_5_3, ATARD1_N),
1337 PINMUX_IPSR_DATA(IP10_8_6, VI0_G3), 1336 PINMUX_IPSR_DATA(IP10_8_6, VI0_G3),
1338 PINMUX_IPSR_DATA(IP10_8_6, VI2_VSYNC_N), 1337 PINMUX_IPSR_DATA(IP10_8_6, VI2_VSYNC_N),
1339 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, STP_ISEN_0_C, SEL_SSP_2), 1338 PINMUX_IPSR_MSEL(IP10_8_6, STP_ISEN_0_C, SEL_SSP_2),
1340 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, SDA3_B, SEL_IIC3_1), 1339 PINMUX_IPSR_MSEL(IP10_8_6, SDA3_B, SEL_IIC3_1),
1341 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, HRX2, SEL_HSCIF2_0), 1340 PINMUX_IPSR_MSEL(IP10_8_6, HRX2, SEL_HSCIF2_0),
1342 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, SCIFB2_RXD, SEL_SCIFB2_0), 1341 PINMUX_IPSR_MSEL(IP10_8_6, SCIFB2_RXD, SEL_SCIFB2_0),
1343 PINMUX_IPSR_DATA(IP10_8_6, ATACS01_N), 1342 PINMUX_IPSR_DATA(IP10_8_6, ATACS01_N),
1344 PINMUX_IPSR_DATA(IP10_11_9, VI0_G4), 1343 PINMUX_IPSR_DATA(IP10_11_9, VI0_G4),
1345 PINMUX_IPSR_DATA(IP10_11_9, VI2_CLKENB), 1344 PINMUX_IPSR_DATA(IP10_11_9, VI2_CLKENB),
1346 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, STP_ISSYNC_0_C, SEL_SSP_2), 1345 PINMUX_IPSR_MSEL(IP10_11_9, STP_ISSYNC_0_C, SEL_SSP_2),
1347 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, HTX2, SEL_HSCIF2_0), 1346 PINMUX_IPSR_MSEL(IP10_11_9, HTX2, SEL_HSCIF2_0),
1348 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SCIFB2_TXD, SEL_SCIFB2_0), 1347 PINMUX_IPSR_MSEL(IP10_11_9, SCIFB2_TXD, SEL_SCIFB2_0),
1349 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SCIFB0_SCK_D, SEL_SCIFB_3), 1348 PINMUX_IPSR_MSEL(IP10_11_9, SCIFB0_SCK_D, SEL_SCIFB_3),
1350 PINMUX_IPSR_DATA(IP10_14_12, VI0_G5), 1349 PINMUX_IPSR_DATA(IP10_14_12, VI0_G5),
1351 PINMUX_IPSR_DATA(IP10_14_12, VI2_FIELD), 1350 PINMUX_IPSR_DATA(IP10_14_12, VI2_FIELD),
1352 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, STP_OPWM_0_C, SEL_SSP_2), 1351 PINMUX_IPSR_MSEL(IP10_14_12, STP_OPWM_0_C, SEL_SSP_2),
1353 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, FMCLK_D, SEL_FM_3), 1352 PINMUX_IPSR_MSEL(IP10_14_12, FMCLK_D, SEL_FM_3),
1354 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, CAN0_TX_E, SEL_CAN0_4), 1353 PINMUX_IPSR_MSEL(IP10_14_12, CAN0_TX_E, SEL_CAN0_4),
1355 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, HTX1_D, SEL_HSCIF1_3), 1354 PINMUX_IPSR_MSEL(IP10_14_12, HTX1_D, SEL_HSCIF1_3),
1356 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SCIFB0_TXD_D, SEL_SCIFB_3), 1355 PINMUX_IPSR_MSEL(IP10_14_12, SCIFB0_TXD_D, SEL_SCIFB_3),
1357 PINMUX_IPSR_DATA(IP10_16_15, VI0_G6), 1356 PINMUX_IPSR_DATA(IP10_16_15, VI0_G6),
1358 PINMUX_IPSR_DATA(IP10_16_15, VI2_CLK), 1357 PINMUX_IPSR_DATA(IP10_16_15, VI2_CLK),
1359 PINMUX_IPSR_MODSEL_DATA(IP10_16_15, BPFCLK_D, SEL_FM_3), 1358 PINMUX_IPSR_MSEL(IP10_16_15, BPFCLK_D, SEL_FM_3),
1360 PINMUX_IPSR_DATA(IP10_18_17, VI0_G7), 1359 PINMUX_IPSR_DATA(IP10_18_17, VI0_G7),
1361 PINMUX_IPSR_DATA(IP10_18_17, VI2_DATA0), 1360 PINMUX_IPSR_DATA(IP10_18_17, VI2_DATA0),
1362 PINMUX_IPSR_MODSEL_DATA(IP10_18_17, FMIN_D, SEL_FM_3), 1361 PINMUX_IPSR_MSEL(IP10_18_17, FMIN_D, SEL_FM_3),
1363 PINMUX_IPSR_DATA(IP10_21_19, VI0_R0), 1362 PINMUX_IPSR_DATA(IP10_21_19, VI0_R0),
1364 PINMUX_IPSR_DATA(IP10_21_19, VI2_DATA1), 1363 PINMUX_IPSR_DATA(IP10_21_19, VI2_DATA1),
1365 PINMUX_IPSR_MODSEL_DATA(IP10_21_19, GLO_I0_B, SEL_GPS_1), 1364 PINMUX_IPSR_MSEL(IP10_21_19, GLO_I0_B, SEL_GPS_1),
1366 PINMUX_IPSR_MODSEL_DATA(IP10_21_19, TS_SDATA0_C, SEL_TSIF0_2), 1365 PINMUX_IPSR_MSEL(IP10_21_19, TS_SDATA0_C, SEL_TSIF0_2),
1367 PINMUX_IPSR_DATA(IP10_21_19, ATACS11_N), 1366 PINMUX_IPSR_DATA(IP10_21_19, ATACS11_N),
1368 PINMUX_IPSR_DATA(IP10_24_22, VI0_R1), 1367 PINMUX_IPSR_DATA(IP10_24_22, VI0_R1),
1369 PINMUX_IPSR_DATA(IP10_24_22, VI2_DATA2), 1368 PINMUX_IPSR_DATA(IP10_24_22, VI2_DATA2),
1370 PINMUX_IPSR_MODSEL_DATA(IP10_24_22, GLO_I1_B, SEL_GPS_1), 1369 PINMUX_IPSR_MSEL(IP10_24_22, GLO_I1_B, SEL_GPS_1),
1371 PINMUX_IPSR_MODSEL_DATA(IP10_24_22, TS_SCK0_C, SEL_TSIF0_2), 1370 PINMUX_IPSR_MSEL(IP10_24_22, TS_SCK0_C, SEL_TSIF0_2),
1372 PINMUX_IPSR_DATA(IP10_24_22, ATAG1_N), 1371 PINMUX_IPSR_DATA(IP10_24_22, ATAG1_N),
1373 PINMUX_IPSR_DATA(IP10_26_25, VI0_R2), 1372 PINMUX_IPSR_DATA(IP10_26_25, VI0_R2),
1374 PINMUX_IPSR_DATA(IP10_26_25, VI2_DATA3), 1373 PINMUX_IPSR_DATA(IP10_26_25, VI2_DATA3),
1375 PINMUX_IPSR_MODSEL_DATA(IP10_26_25, GLO_Q0_B, SEL_GPS_1), 1374 PINMUX_IPSR_MSEL(IP10_26_25, GLO_Q0_B, SEL_GPS_1),
1376 PINMUX_IPSR_MODSEL_DATA(IP10_26_25, TS_SDEN0_C, SEL_TSIF0_2), 1375 PINMUX_IPSR_MSEL(IP10_26_25, TS_SDEN0_C, SEL_TSIF0_2),
1377 PINMUX_IPSR_DATA(IP10_28_27, VI0_R3), 1376 PINMUX_IPSR_DATA(IP10_28_27, VI0_R3),
1378 PINMUX_IPSR_DATA(IP10_28_27, VI2_DATA4), 1377 PINMUX_IPSR_DATA(IP10_28_27, VI2_DATA4),
1379 PINMUX_IPSR_MODSEL_DATA(IP10_28_27, GLO_Q1_B, SEL_GPS_1), 1378 PINMUX_IPSR_MSEL(IP10_28_27, GLO_Q1_B, SEL_GPS_1),
1380 PINMUX_IPSR_MODSEL_DATA(IP10_28_27, TS_SPSYNC0_C, SEL_TSIF0_2), 1379 PINMUX_IPSR_MSEL(IP10_28_27, TS_SPSYNC0_C, SEL_TSIF0_2),
1381 PINMUX_IPSR_DATA(IP10_31_29, VI0_R4), 1380 PINMUX_IPSR_DATA(IP10_31_29, VI0_R4),
1382 PINMUX_IPSR_DATA(IP10_31_29, VI2_DATA5), 1381 PINMUX_IPSR_DATA(IP10_31_29, VI2_DATA5),
1383 PINMUX_IPSR_MODSEL_DATA(IP10_31_29, GLO_SCLK_B, SEL_GPS_1), 1382 PINMUX_IPSR_MSEL(IP10_31_29, GLO_SCLK_B, SEL_GPS_1),
1384 PINMUX_IPSR_MODSEL_DATA(IP10_31_29, TX0_C, SEL_SCIF0_2), 1383 PINMUX_IPSR_MSEL(IP10_31_29, TX0_C, SEL_SCIF0_2),
1385 PINMUX_IPSR_MODSEL_DATA(IP10_31_29, SCL1_D, SEL_IIC1_3), 1384 PINMUX_IPSR_MSEL(IP10_31_29, SCL1_D, SEL_IIC1_3),
1386 1385
1387 /* IPSR11 */ 1386 /* IPSR11 */
1388 PINMUX_IPSR_DATA(IP11_2_0, VI0_R5), 1387 PINMUX_IPSR_DATA(IP11_2_0, VI0_R5),
1389 PINMUX_IPSR_DATA(IP11_2_0, VI2_DATA6), 1388 PINMUX_IPSR_DATA(IP11_2_0, VI2_DATA6),
1390 PINMUX_IPSR_MODSEL_DATA(IP11_2_0, GLO_SDATA_B, SEL_GPS_1), 1389 PINMUX_IPSR_MSEL(IP11_2_0, GLO_SDATA_B, SEL_GPS_1),
1391 PINMUX_IPSR_MODSEL_DATA(IP11_2_0, RX0_C, SEL_SCIF0_2), 1390 PINMUX_IPSR_MSEL(IP11_2_0, RX0_C, SEL_SCIF0_2),
1392 PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SDA1_D, SEL_IIC1_3), 1391 PINMUX_IPSR_MSEL(IP11_2_0, SDA1_D, SEL_IIC1_3),
1393 PINMUX_IPSR_DATA(IP11_5_3, VI0_R6), 1392 PINMUX_IPSR_DATA(IP11_5_3, VI0_R6),
1394 PINMUX_IPSR_DATA(IP11_5_3, VI2_DATA7), 1393 PINMUX_IPSR_DATA(IP11_5_3, VI2_DATA7),
1395 PINMUX_IPSR_MODSEL_DATA(IP11_5_3, GLO_SS_B, SEL_GPS_1), 1394 PINMUX_IPSR_MSEL(IP11_5_3, GLO_SS_B, SEL_GPS_1),
1396 PINMUX_IPSR_MODSEL_DATA(IP11_5_3, TX1_C, SEL_SCIF1_2), 1395 PINMUX_IPSR_MSEL(IP11_5_3, TX1_C, SEL_SCIF1_2),
1397 PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SCL4_B, SEL_IIC4_1), 1396 PINMUX_IPSR_MSEL(IP11_5_3, SCL4_B, SEL_IIC4_1),
1398 PINMUX_IPSR_DATA(IP11_8_6, VI0_R7), 1397 PINMUX_IPSR_DATA(IP11_8_6, VI0_R7),
1399 PINMUX_IPSR_MODSEL_DATA(IP11_8_6, GLO_RFON_B, SEL_GPS_1), 1398 PINMUX_IPSR_MSEL(IP11_8_6, GLO_RFON_B, SEL_GPS_1),
1400 PINMUX_IPSR_MODSEL_DATA(IP11_8_6, RX1_C, SEL_SCIF1_2), 1399 PINMUX_IPSR_MSEL(IP11_8_6, RX1_C, SEL_SCIF1_2),
1401 PINMUX_IPSR_MODSEL_DATA(IP11_8_6, CAN0_RX_E, SEL_CAN0_4), 1400 PINMUX_IPSR_MSEL(IP11_8_6, CAN0_RX_E, SEL_CAN0_4),
1402 PINMUX_IPSR_MODSEL_DATA(IP11_8_6, SDA4_B, SEL_IIC4_1), 1401 PINMUX_IPSR_MSEL(IP11_8_6, SDA4_B, SEL_IIC4_1),
1403 PINMUX_IPSR_MODSEL_DATA(IP11_8_6, HRX1_D, SEL_HSCIF1_3), 1402 PINMUX_IPSR_MSEL(IP11_8_6, HRX1_D, SEL_HSCIF1_3),
1404 PINMUX_IPSR_MODSEL_DATA(IP11_8_6, SCIFB0_RXD_D, SEL_SCIFB_3), 1403 PINMUX_IPSR_MSEL(IP11_8_6, SCIFB0_RXD_D, SEL_SCIFB_3),
1405 PINMUX_IPSR_MODSEL_DATA(IP11_11_9, VI1_HSYNC_N, SEL_VI1_0), 1404 PINMUX_IPSR_MSEL(IP11_11_9, VI1_HSYNC_N, SEL_VI1_0),
1406 PINMUX_IPSR_DATA(IP11_11_9, AVB_RXD0), 1405 PINMUX_IPSR_DATA(IP11_11_9, AVB_RXD0),
1407 PINMUX_IPSR_MODSEL_DATA(IP11_11_9, TS_SDATA0_B, SEL_TSIF0_1), 1406 PINMUX_IPSR_MSEL(IP11_11_9, TS_SDATA0_B, SEL_TSIF0_1),
1408 PINMUX_IPSR_MODSEL_DATA(IP11_11_9, TX4_B, SEL_SCIF4_1), 1407 PINMUX_IPSR_MSEL(IP11_11_9, TX4_B, SEL_SCIF4_1),
1409 PINMUX_IPSR_MODSEL_DATA(IP11_11_9, SCIFA4_TXD_B, SEL_SCIFA4_1), 1408 PINMUX_IPSR_MSEL(IP11_11_9, SCIFA4_TXD_B, SEL_SCIFA4_1),
1410 PINMUX_IPSR_MODSEL_DATA(IP11_14_12, VI1_VSYNC_N, SEL_VI1_0), 1409 PINMUX_IPSR_MSEL(IP11_14_12, VI1_VSYNC_N, SEL_VI1_0),
1411 PINMUX_IPSR_DATA(IP11_14_12, AVB_RXD1), 1410 PINMUX_IPSR_DATA(IP11_14_12, AVB_RXD1),
1412 PINMUX_IPSR_MODSEL_DATA(IP11_14_12, TS_SCK0_B, SEL_TSIF0_1), 1411 PINMUX_IPSR_MSEL(IP11_14_12, TS_SCK0_B, SEL_TSIF0_1),
1413 PINMUX_IPSR_MODSEL_DATA(IP11_14_12, RX4_B, SEL_SCIF4_1), 1412 PINMUX_IPSR_MSEL(IP11_14_12, RX4_B, SEL_SCIF4_1),
1414 PINMUX_IPSR_MODSEL_DATA(IP11_14_12, SCIFA4_RXD_B, SEL_SCIFA4_1), 1413 PINMUX_IPSR_MSEL(IP11_14_12, SCIFA4_RXD_B, SEL_SCIFA4_1),
1415 PINMUX_IPSR_MODSEL_DATA(IP11_16_15, VI1_CLKENB, SEL_VI1_0), 1414 PINMUX_IPSR_MSEL(IP11_16_15, VI1_CLKENB, SEL_VI1_0),
1416 PINMUX_IPSR_DATA(IP11_16_15, AVB_RXD2), 1415 PINMUX_IPSR_DATA(IP11_16_15, AVB_RXD2),
1417 PINMUX_IPSR_MODSEL_DATA(IP11_16_15, TS_SDEN0_B, SEL_TSIF0_1), 1416 PINMUX_IPSR_MSEL(IP11_16_15, TS_SDEN0_B, SEL_TSIF0_1),
1418 PINMUX_IPSR_MODSEL_DATA(IP11_18_17, VI1_FIELD, SEL_VI1_0), 1417 PINMUX_IPSR_MSEL(IP11_18_17, VI1_FIELD, SEL_VI1_0),
1419 PINMUX_IPSR_DATA(IP11_18_17, AVB_RXD3), 1418 PINMUX_IPSR_DATA(IP11_18_17, AVB_RXD3),
1420 PINMUX_IPSR_MODSEL_DATA(IP11_18_17, TS_SPSYNC0_B, SEL_TSIF0_1), 1419 PINMUX_IPSR_MSEL(IP11_18_17, TS_SPSYNC0_B, SEL_TSIF0_1),
1421 PINMUX_IPSR_MODSEL_DATA(IP11_19, VI1_CLK, SEL_VI1_0), 1420 PINMUX_IPSR_MSEL(IP11_19, VI1_CLK, SEL_VI1_0),
1422 PINMUX_IPSR_DATA(IP11_19, AVB_RXD4), 1421 PINMUX_IPSR_DATA(IP11_19, AVB_RXD4),
1423 PINMUX_IPSR_MODSEL_DATA(IP11_20, VI1_DATA0, SEL_VI1_0), 1422 PINMUX_IPSR_MSEL(IP11_20, VI1_DATA0, SEL_VI1_0),
1424 PINMUX_IPSR_DATA(IP11_20, AVB_RXD5), 1423 PINMUX_IPSR_DATA(IP11_20, AVB_RXD5),
1425 PINMUX_IPSR_MODSEL_DATA(IP11_21, VI1_DATA1, SEL_VI1_0), 1424 PINMUX_IPSR_MSEL(IP11_21, VI1_DATA1, SEL_VI1_0),
1426 PINMUX_IPSR_DATA(IP11_21, AVB_RXD6), 1425 PINMUX_IPSR_DATA(IP11_21, AVB_RXD6),
1427 PINMUX_IPSR_MODSEL_DATA(IP11_22, VI1_DATA2, SEL_VI1_0), 1426 PINMUX_IPSR_MSEL(IP11_22, VI1_DATA2, SEL_VI1_0),
1428 PINMUX_IPSR_DATA(IP11_22, AVB_RXD7), 1427 PINMUX_IPSR_DATA(IP11_22, AVB_RXD7),
1429 PINMUX_IPSR_MODSEL_DATA(IP11_23, VI1_DATA3, SEL_VI1_0), 1428 PINMUX_IPSR_MSEL(IP11_23, VI1_DATA3, SEL_VI1_0),
1430 PINMUX_IPSR_DATA(IP11_23, AVB_RX_ER), 1429 PINMUX_IPSR_DATA(IP11_23, AVB_RX_ER),
1431 PINMUX_IPSR_MODSEL_DATA(IP11_24, VI1_DATA4, SEL_VI1_0), 1430 PINMUX_IPSR_MSEL(IP11_24, VI1_DATA4, SEL_VI1_0),
1432 PINMUX_IPSR_DATA(IP11_24, AVB_MDIO), 1431 PINMUX_IPSR_DATA(IP11_24, AVB_MDIO),
1433 PINMUX_IPSR_MODSEL_DATA(IP11_25, VI1_DATA5, SEL_VI1_0), 1432 PINMUX_IPSR_MSEL(IP11_25, VI1_DATA5, SEL_VI1_0),
1434 PINMUX_IPSR_DATA(IP11_25, AVB_RX_DV), 1433 PINMUX_IPSR_DATA(IP11_25, AVB_RX_DV),
1435 PINMUX_IPSR_MODSEL_DATA(IP11_26, VI1_DATA6, SEL_VI1_0), 1434 PINMUX_IPSR_MSEL(IP11_26, VI1_DATA6, SEL_VI1_0),
1436 PINMUX_IPSR_DATA(IP11_26, AVB_MAGIC), 1435 PINMUX_IPSR_DATA(IP11_26, AVB_MAGIC),
1437 PINMUX_IPSR_MODSEL_DATA(IP11_27, VI1_DATA7, SEL_VI1_0), 1436 PINMUX_IPSR_MSEL(IP11_27, VI1_DATA7, SEL_VI1_0),
1438 PINMUX_IPSR_DATA(IP11_27, AVB_MDC), 1437 PINMUX_IPSR_DATA(IP11_27, AVB_MDC),
1439 PINMUX_IPSR_DATA(IP11_29_28, ETH_MDIO), 1438 PINMUX_IPSR_DATA(IP11_29_28, ETH_MDIO),
1440 PINMUX_IPSR_DATA(IP11_29_28, AVB_RX_CLK), 1439 PINMUX_IPSR_DATA(IP11_29_28, AVB_RX_CLK),
1441 PINMUX_IPSR_MODSEL_DATA(IP11_29_28, SCL2_C, SEL_IIC2_2), 1440 PINMUX_IPSR_MSEL(IP11_29_28, SCL2_C, SEL_IIC2_2),
1442 PINMUX_IPSR_DATA(IP11_31_30, ETH_CRS_DV), 1441 PINMUX_IPSR_DATA(IP11_31_30, ETH_CRS_DV),
1443 PINMUX_IPSR_DATA(IP11_31_30, AVB_LINK), 1442 PINMUX_IPSR_DATA(IP11_31_30, AVB_LINK),
1444 PINMUX_IPSR_MODSEL_DATA(IP11_31_30, SDA2_C, SEL_IIC2_2), 1443 PINMUX_IPSR_MSEL(IP11_31_30, SDA2_C, SEL_IIC2_2),
1445 1444
1446 /* IPSR12 */ 1445 /* IPSR12 */
1447 PINMUX_IPSR_DATA(IP12_1_0, ETH_RX_ER), 1446 PINMUX_IPSR_DATA(IP12_1_0, ETH_RX_ER),
1448 PINMUX_IPSR_DATA(IP12_1_0, AVB_CRS), 1447 PINMUX_IPSR_DATA(IP12_1_0, AVB_CRS),
1449 PINMUX_IPSR_MODSEL_DATA(IP12_1_0, SCL3, SEL_IIC3_0), 1448 PINMUX_IPSR_MSEL(IP12_1_0, SCL3, SEL_IIC3_0),
1450 PINMUX_IPSR_MODSEL_DATA(IP12_1_0, SCL7, SEL_IIC7_0), 1449 PINMUX_IPSR_MSEL(IP12_1_0, SCL7, SEL_IIC7_0),
1451 PINMUX_IPSR_DATA(IP12_3_2, ETH_RXD0), 1450 PINMUX_IPSR_DATA(IP12_3_2, ETH_RXD0),
1452 PINMUX_IPSR_DATA(IP12_3_2, AVB_PHY_INT), 1451 PINMUX_IPSR_DATA(IP12_3_2, AVB_PHY_INT),
1453 PINMUX_IPSR_MODSEL_DATA(IP12_3_2, SDA3, SEL_IIC3_0), 1452 PINMUX_IPSR_MSEL(IP12_3_2, SDA3, SEL_IIC3_0),
1454 PINMUX_IPSR_MODSEL_DATA(IP12_3_2, SDA7, SEL_IIC7_0), 1453 PINMUX_IPSR_MSEL(IP12_3_2, SDA7, SEL_IIC7_0),
1455 PINMUX_IPSR_DATA(IP12_6_4, ETH_RXD1), 1454 PINMUX_IPSR_DATA(IP12_6_4, ETH_RXD1),
1456 PINMUX_IPSR_DATA(IP12_6_4, AVB_GTXREFCLK), 1455 PINMUX_IPSR_DATA(IP12_6_4, AVB_GTXREFCLK),
1457 PINMUX_IPSR_MODSEL_DATA(IP12_6_4, CAN0_TX_C, SEL_CAN0_2), 1456 PINMUX_IPSR_MSEL(IP12_6_4, CAN0_TX_C, SEL_CAN0_2),
1458 PINMUX_IPSR_MODSEL_DATA(IP12_6_4, SCL2_D, SEL_IIC2_3), 1457 PINMUX_IPSR_MSEL(IP12_6_4, SCL2_D, SEL_IIC2_3),
1459 PINMUX_IPSR_MODSEL_DATA(IP12_6_4, MSIOF1_RXD_E, SEL_SOF1_4), 1458 PINMUX_IPSR_MSEL(IP12_6_4, MSIOF1_RXD_E, SEL_SOF1_4),
1460 PINMUX_IPSR_DATA(IP12_9_7, ETH_LINK), 1459 PINMUX_IPSR_DATA(IP12_9_7, ETH_LINK),
1461 PINMUX_IPSR_DATA(IP12_9_7, AVB_TXD0), 1460 PINMUX_IPSR_DATA(IP12_9_7, AVB_TXD0),
1462 PINMUX_IPSR_MODSEL_DATA(IP12_9_7, CAN0_RX_C, SEL_CAN0_2), 1461 PINMUX_IPSR_MSEL(IP12_9_7, CAN0_RX_C, SEL_CAN0_2),
1463 PINMUX_IPSR_MODSEL_DATA(IP12_9_7, SDA2_D, SEL_IIC2_3), 1462 PINMUX_IPSR_MSEL(IP12_9_7, SDA2_D, SEL_IIC2_3),
1464 PINMUX_IPSR_MODSEL_DATA(IP12_9_7, MSIOF1_SCK_E, SEL_SOF1_4), 1463 PINMUX_IPSR_MSEL(IP12_9_7, MSIOF1_SCK_E, SEL_SOF1_4),
1465 PINMUX_IPSR_DATA(IP12_12_10, ETH_REFCLK), 1464 PINMUX_IPSR_DATA(IP12_12_10, ETH_REFCLK),
1466 PINMUX_IPSR_DATA(IP12_12_10, AVB_TXD1), 1465 PINMUX_IPSR_DATA(IP12_12_10, AVB_TXD1),
1467 PINMUX_IPSR_MODSEL_DATA(IP12_12_10, SCIFA3_RXD_B, SEL_SCIFA3_1), 1466 PINMUX_IPSR_MSEL(IP12_12_10, SCIFA3_RXD_B, SEL_SCIFA3_1),
1468 PINMUX_IPSR_MODSEL_DATA(IP12_12_10, CAN1_RX_C, SEL_CAN1_2), 1467 PINMUX_IPSR_MSEL(IP12_12_10, CAN1_RX_C, SEL_CAN1_2),
1469 PINMUX_IPSR_MODSEL_DATA(IP12_12_10, MSIOF1_SYNC_E, SEL_SOF1_4), 1468 PINMUX_IPSR_MSEL(IP12_12_10, MSIOF1_SYNC_E, SEL_SOF1_4),
1470 PINMUX_IPSR_DATA(IP12_15_13, ETH_TXD1), 1469 PINMUX_IPSR_DATA(IP12_15_13, ETH_TXD1),
1471 PINMUX_IPSR_DATA(IP12_15_13, AVB_TXD2), 1470 PINMUX_IPSR_DATA(IP12_15_13, AVB_TXD2),
1472 PINMUX_IPSR_MODSEL_DATA(IP12_15_13, SCIFA3_TXD_B, SEL_SCIFA3_1), 1471 PINMUX_IPSR_MSEL(IP12_15_13, SCIFA3_TXD_B, SEL_SCIFA3_1),
1473 PINMUX_IPSR_MODSEL_DATA(IP12_15_13, CAN1_TX_C, SEL_CAN1_2), 1472 PINMUX_IPSR_MSEL(IP12_15_13, CAN1_TX_C, SEL_CAN1_2),
1474 PINMUX_IPSR_MODSEL_DATA(IP12_15_13, MSIOF1_TXD_E, SEL_SOF1_4), 1473 PINMUX_IPSR_MSEL(IP12_15_13, MSIOF1_TXD_E, SEL_SOF1_4),
1475 PINMUX_IPSR_DATA(IP12_17_16, ETH_TX_EN), 1474 PINMUX_IPSR_DATA(IP12_17_16, ETH_TX_EN),
1476 PINMUX_IPSR_DATA(IP12_17_16, AVB_TXD3), 1475 PINMUX_IPSR_DATA(IP12_17_16, AVB_TXD3),
1477 PINMUX_IPSR_MODSEL_DATA(IP12_17_16, TCLK1_B, SEL_TMU1_0), 1476 PINMUX_IPSR_MSEL(IP12_17_16, TCLK1_B, SEL_TMU1_0),
1478 PINMUX_IPSR_MODSEL_DATA(IP12_17_16, CAN_CLK_B, SEL_CANCLK_1), 1477 PINMUX_IPSR_MSEL(IP12_17_16, CAN_CLK_B, SEL_CANCLK_1),
1479 PINMUX_IPSR_DATA(IP12_19_18, ETH_MAGIC), 1478 PINMUX_IPSR_DATA(IP12_19_18, ETH_MAGIC),
1480 PINMUX_IPSR_DATA(IP12_19_18, AVB_TXD4), 1479 PINMUX_IPSR_DATA(IP12_19_18, AVB_TXD4),
1481 PINMUX_IPSR_MODSEL_DATA(IP12_19_18, IETX_C, SEL_IEB_2), 1480 PINMUX_IPSR_MSEL(IP12_19_18, IETX_C, SEL_IEB_2),
1482 PINMUX_IPSR_DATA(IP12_21_20, ETH_TXD0), 1481 PINMUX_IPSR_DATA(IP12_21_20, ETH_TXD0),
1483 PINMUX_IPSR_DATA(IP12_21_20, AVB_TXD5), 1482 PINMUX_IPSR_DATA(IP12_21_20, AVB_TXD5),
1484 PINMUX_IPSR_MODSEL_DATA(IP12_21_20, IECLK_C, SEL_IEB_2), 1483 PINMUX_IPSR_MSEL(IP12_21_20, IECLK_C, SEL_IEB_2),
1485 PINMUX_IPSR_DATA(IP12_23_22, ETH_MDC), 1484 PINMUX_IPSR_DATA(IP12_23_22, ETH_MDC),
1486 PINMUX_IPSR_DATA(IP12_23_22, AVB_TXD6), 1485 PINMUX_IPSR_DATA(IP12_23_22, AVB_TXD6),
1487 PINMUX_IPSR_MODSEL_DATA(IP12_23_22, IERX_C, SEL_IEB_2), 1486 PINMUX_IPSR_MSEL(IP12_23_22, IERX_C, SEL_IEB_2),
1488 PINMUX_IPSR_MODSEL_DATA(IP12_26_24, STP_IVCXO27_0, SEL_SSP_0), 1487 PINMUX_IPSR_MSEL(IP12_26_24, STP_IVCXO27_0, SEL_SSP_0),
1489 PINMUX_IPSR_DATA(IP12_26_24, AVB_TXD7), 1488 PINMUX_IPSR_DATA(IP12_26_24, AVB_TXD7),
1490 PINMUX_IPSR_MODSEL_DATA(IP12_26_24, SCIFB2_TXD_D, SEL_SCIFB2_3), 1489 PINMUX_IPSR_MSEL(IP12_26_24, SCIFB2_TXD_D, SEL_SCIFB2_3),
1491 PINMUX_IPSR_MODSEL_DATA(IP12_26_24, ADIDATA_B, SEL_RAD_1), 1490 PINMUX_IPSR_MSEL(IP12_26_24, ADIDATA_B, SEL_RAD_1),
1492 PINMUX_IPSR_MODSEL_DATA(IP12_26_24, MSIOF0_SYNC_C, SEL_SOF0_2), 1491 PINMUX_IPSR_MSEL(IP12_26_24, MSIOF0_SYNC_C, SEL_SOF0_2),
1493 PINMUX_IPSR_MODSEL_DATA(IP12_29_27, STP_ISCLK_0, SEL_SSP_0), 1492 PINMUX_IPSR_MSEL(IP12_29_27, STP_ISCLK_0, SEL_SSP_0),
1494 PINMUX_IPSR_DATA(IP12_29_27, AVB_TX_EN), 1493 PINMUX_IPSR_DATA(IP12_29_27, AVB_TX_EN),
1495 PINMUX_IPSR_MODSEL_DATA(IP12_29_27, SCIFB2_RXD_D, SEL_SCIFB2_3), 1494 PINMUX_IPSR_MSEL(IP12_29_27, SCIFB2_RXD_D, SEL_SCIFB2_3),
1496 PINMUX_IPSR_MODSEL_DATA(IP12_29_27, ADICS_SAMP_B, SEL_RAD_1), 1495 PINMUX_IPSR_MSEL(IP12_29_27, ADICS_SAMP_B, SEL_RAD_1),
1497 PINMUX_IPSR_MODSEL_DATA(IP12_29_27, MSIOF0_SCK_C, SEL_SOF0_2), 1496 PINMUX_IPSR_MSEL(IP12_29_27, MSIOF0_SCK_C, SEL_SOF0_2),
1498 1497
1499 /* IPSR13 */ 1498 /* IPSR13 */
1500 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, STP_ISD_0, SEL_SSP_0), 1499 PINMUX_IPSR_MSEL(IP13_2_0, STP_ISD_0, SEL_SSP_0),
1501 PINMUX_IPSR_DATA(IP13_2_0, AVB_TX_ER), 1500 PINMUX_IPSR_DATA(IP13_2_0, AVB_TX_ER),
1502 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SCIFB2_SCK_C, SEL_SCIFB2_2), 1501 PINMUX_IPSR_MSEL(IP13_2_0, SCIFB2_SCK_C, SEL_SCIFB2_2),
1503 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, ADICLK_B, SEL_RAD_1), 1502 PINMUX_IPSR_MSEL(IP13_2_0, ADICLK_B, SEL_RAD_1),
1504 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, MSIOF0_SS1_C, SEL_SOF0_2), 1503 PINMUX_IPSR_MSEL(IP13_2_0, MSIOF0_SS1_C, SEL_SOF0_2),
1505 PINMUX_IPSR_MODSEL_DATA(IP13_4_3, STP_ISEN_0, SEL_SSP_0), 1504 PINMUX_IPSR_MSEL(IP13_4_3, STP_ISEN_0, SEL_SSP_0),
1506 PINMUX_IPSR_DATA(IP13_4_3, AVB_TX_CLK), 1505 PINMUX_IPSR_DATA(IP13_4_3, AVB_TX_CLK),
1507 PINMUX_IPSR_MODSEL_DATA(IP13_4_3, ADICHS0_B, SEL_RAD_1), 1506 PINMUX_IPSR_MSEL(IP13_4_3, ADICHS0_B, SEL_RAD_1),
1508 PINMUX_IPSR_MODSEL_DATA(IP13_4_3, MSIOF0_SS2_C, SEL_SOF0_2), 1507 PINMUX_IPSR_MSEL(IP13_4_3, MSIOF0_SS2_C, SEL_SOF0_2),
1509 PINMUX_IPSR_MODSEL_DATA(IP13_6_5, STP_ISSYNC_0, SEL_SSP_0), 1508 PINMUX_IPSR_MSEL(IP13_6_5, STP_ISSYNC_0, SEL_SSP_0),
1510 PINMUX_IPSR_DATA(IP13_6_5, AVB_COL), 1509 PINMUX_IPSR_DATA(IP13_6_5, AVB_COL),
1511 PINMUX_IPSR_MODSEL_DATA(IP13_6_5, ADICHS1_B, SEL_RAD_1), 1510 PINMUX_IPSR_MSEL(IP13_6_5, ADICHS1_B, SEL_RAD_1),
1512 PINMUX_IPSR_MODSEL_DATA(IP13_6_5, MSIOF0_RXD_C, SEL_SOF0_2), 1511 PINMUX_IPSR_MSEL(IP13_6_5, MSIOF0_RXD_C, SEL_SOF0_2),
1513 PINMUX_IPSR_MODSEL_DATA(IP13_9_7, STP_OPWM_0, SEL_SSP_0), 1512 PINMUX_IPSR_MSEL(IP13_9_7, STP_OPWM_0, SEL_SSP_0),
1514 PINMUX_IPSR_DATA(IP13_9_7, AVB_GTX_CLK), 1513 PINMUX_IPSR_DATA(IP13_9_7, AVB_GTX_CLK),
1515 PINMUX_IPSR_DATA(IP13_9_7, PWM0_B), 1514 PINMUX_IPSR_DATA(IP13_9_7, PWM0_B),
1516 PINMUX_IPSR_MODSEL_DATA(IP13_9_7, ADICHS2_B, SEL_RAD_1), 1515 PINMUX_IPSR_MSEL(IP13_9_7, ADICHS2_B, SEL_RAD_1),
1517 PINMUX_IPSR_MODSEL_DATA(IP13_9_7, MSIOF0_TXD_C, SEL_SOF0_2), 1516 PINMUX_IPSR_MSEL(IP13_9_7, MSIOF0_TXD_C, SEL_SOF0_2),
1518 PINMUX_IPSR_DATA(IP13_10, SD0_CLK), 1517 PINMUX_IPSR_DATA(IP13_10, SD0_CLK),
1519 PINMUX_IPSR_MODSEL_DATA(IP13_10, SPCLK_B, SEL_QSP_1), 1518 PINMUX_IPSR_MSEL(IP13_10, SPCLK_B, SEL_QSP_1),
1520 PINMUX_IPSR_DATA(IP13_11, SD0_CMD), 1519 PINMUX_IPSR_DATA(IP13_11, SD0_CMD),
1521 PINMUX_IPSR_MODSEL_DATA(IP13_11, MOSI_IO0_B, SEL_QSP_1), 1520 PINMUX_IPSR_MSEL(IP13_11, MOSI_IO0_B, SEL_QSP_1),
1522 PINMUX_IPSR_DATA(IP13_12, SD0_DATA0), 1521 PINMUX_IPSR_DATA(IP13_12, SD0_DATA0),
1523 PINMUX_IPSR_MODSEL_DATA(IP13_12, MISO_IO1_B, SEL_QSP_1), 1522 PINMUX_IPSR_MSEL(IP13_12, MISO_IO1_B, SEL_QSP_1),
1524 PINMUX_IPSR_DATA(IP13_13, SD0_DATA1), 1523 PINMUX_IPSR_DATA(IP13_13, SD0_DATA1),
1525 PINMUX_IPSR_MODSEL_DATA(IP13_13, IO2_B, SEL_QSP_1), 1524 PINMUX_IPSR_MSEL(IP13_13, IO2_B, SEL_QSP_1),
1526 PINMUX_IPSR_DATA(IP13_14, SD0_DATA2), 1525 PINMUX_IPSR_DATA(IP13_14, SD0_DATA2),
1527 PINMUX_IPSR_MODSEL_DATA(IP13_14, IO3_B, SEL_QSP_1), 1526 PINMUX_IPSR_MSEL(IP13_14, IO3_B, SEL_QSP_1),
1528 PINMUX_IPSR_DATA(IP13_15, SD0_DATA3), 1527 PINMUX_IPSR_DATA(IP13_15, SD0_DATA3),
1529 PINMUX_IPSR_MODSEL_DATA(IP13_15, SSL_B, SEL_QSP_1), 1528 PINMUX_IPSR_MSEL(IP13_15, SSL_B, SEL_QSP_1),
1530 PINMUX_IPSR_DATA(IP13_18_16, SD0_CD), 1529 PINMUX_IPSR_DATA(IP13_18_16, SD0_CD),
1531 PINMUX_IPSR_MODSEL_DATA(IP13_18_16, MMC_D6_B, SEL_MMC_1), 1530 PINMUX_IPSR_MSEL(IP13_18_16, MMC_D6_B, SEL_MMC_1),
1532 PINMUX_IPSR_MODSEL_DATA(IP13_18_16, SIM0_RST_B, SEL_SIM_1), 1531 PINMUX_IPSR_MSEL(IP13_18_16, SIM0_RST_B, SEL_SIM_1),
1533 PINMUX_IPSR_MODSEL_DATA(IP13_18_16, CAN0_RX_F, SEL_CAN0_5), 1532 PINMUX_IPSR_MSEL(IP13_18_16, CAN0_RX_F, SEL_CAN0_5),
1534 PINMUX_IPSR_MODSEL_DATA(IP13_18_16, SCIFA5_TXD_B, SEL_SCIFA5_1), 1533 PINMUX_IPSR_MSEL(IP13_18_16, SCIFA5_TXD_B, SEL_SCIFA5_1),
1535 PINMUX_IPSR_MODSEL_DATA(IP13_18_16, TX3_C, SEL_SCIF3_2), 1534 PINMUX_IPSR_MSEL(IP13_18_16, TX3_C, SEL_SCIF3_2),
1536 PINMUX_IPSR_DATA(IP13_21_19, SD0_WP), 1535 PINMUX_IPSR_DATA(IP13_21_19, SD0_WP),
1537 PINMUX_IPSR_MODSEL_DATA(IP13_21_19, MMC_D7_B, SEL_MMC_1), 1536 PINMUX_IPSR_MSEL(IP13_21_19, MMC_D7_B, SEL_MMC_1),
1538 PINMUX_IPSR_MODSEL_DATA(IP13_21_19, SIM0_D_B, SEL_SIM_1), 1537 PINMUX_IPSR_MSEL(IP13_21_19, SIM0_D_B, SEL_SIM_1),
1539 PINMUX_IPSR_MODSEL_DATA(IP13_21_19, CAN0_TX_F, SEL_CAN0_5), 1538 PINMUX_IPSR_MSEL(IP13_21_19, CAN0_TX_F, SEL_CAN0_5),
1540 PINMUX_IPSR_MODSEL_DATA(IP13_21_19, SCIFA5_RXD_B, SEL_SCIFA5_1), 1539 PINMUX_IPSR_MSEL(IP13_21_19, SCIFA5_RXD_B, SEL_SCIFA5_1),
1541 PINMUX_IPSR_MODSEL_DATA(IP13_21_19, RX3_C, SEL_SCIF3_2), 1540 PINMUX_IPSR_MSEL(IP13_21_19, RX3_C, SEL_SCIF3_2),
1542 PINMUX_IPSR_DATA(IP13_22, SD1_CMD), 1541 PINMUX_IPSR_DATA(IP13_22, SD1_CMD),
1543 PINMUX_IPSR_MODSEL_DATA(IP13_22, REMOCON_B, SEL_RCN_1), 1542 PINMUX_IPSR_MSEL(IP13_22, REMOCON_B, SEL_RCN_1),
1544 PINMUX_IPSR_DATA(IP13_24_23, SD1_DATA0), 1543 PINMUX_IPSR_DATA(IP13_24_23, SD1_DATA0),
1545 PINMUX_IPSR_MODSEL_DATA(IP13_24_23, SPEEDIN_B, SEL_RSP_1), 1544 PINMUX_IPSR_MSEL(IP13_24_23, SPEEDIN_B, SEL_RSP_1),
1546 PINMUX_IPSR_DATA(IP13_25, SD1_DATA1), 1545 PINMUX_IPSR_DATA(IP13_25, SD1_DATA1),
1547 PINMUX_IPSR_MODSEL_DATA(IP13_25, IETX_B, SEL_IEB_1), 1546 PINMUX_IPSR_MSEL(IP13_25, IETX_B, SEL_IEB_1),
1548 PINMUX_IPSR_DATA(IP13_26, SD1_DATA2), 1547 PINMUX_IPSR_DATA(IP13_26, SD1_DATA2),
1549 PINMUX_IPSR_MODSEL_DATA(IP13_26, IECLK_B, SEL_IEB_1), 1548 PINMUX_IPSR_MSEL(IP13_26, IECLK_B, SEL_IEB_1),
1550 PINMUX_IPSR_DATA(IP13_27, SD1_DATA3), 1549 PINMUX_IPSR_DATA(IP13_27, SD1_DATA3),
1551 PINMUX_IPSR_MODSEL_DATA(IP13_27, IERX_B, SEL_IEB_1), 1550 PINMUX_IPSR_MSEL(IP13_27, IERX_B, SEL_IEB_1),
1552 PINMUX_IPSR_DATA(IP13_30_28, SD1_CD), 1551 PINMUX_IPSR_DATA(IP13_30_28, SD1_CD),
1553 PINMUX_IPSR_DATA(IP13_30_28, PWM0), 1552 PINMUX_IPSR_DATA(IP13_30_28, PWM0),
1554 PINMUX_IPSR_DATA(IP13_30_28, TPU_TO0), 1553 PINMUX_IPSR_DATA(IP13_30_28, TPU_TO0),
1555 PINMUX_IPSR_MODSEL_DATA(IP13_30_28, SCL1_C, SEL_IIC1_2), 1554 PINMUX_IPSR_MSEL(IP13_30_28, SCL1_C, SEL_IIC1_2),
1556 1555
1557 /* IPSR14 */ 1556 /* IPSR14 */
1558 PINMUX_IPSR_DATA(IP14_1_0, SD1_WP), 1557 PINMUX_IPSR_DATA(IP14_1_0, SD1_WP),
1559 PINMUX_IPSR_DATA(IP14_1_0, PWM1_B), 1558 PINMUX_IPSR_DATA(IP14_1_0, PWM1_B),
1560 PINMUX_IPSR_MODSEL_DATA(IP14_1_0, SDA1_C, SEL_IIC1_2), 1559 PINMUX_IPSR_MSEL(IP14_1_0, SDA1_C, SEL_IIC1_2),
1561 PINMUX_IPSR_DATA(IP14_2, SD2_CLK), 1560 PINMUX_IPSR_DATA(IP14_2, SD2_CLK),
1562 PINMUX_IPSR_DATA(IP14_2, MMC_CLK), 1561 PINMUX_IPSR_DATA(IP14_2, MMC_CLK),
1563 PINMUX_IPSR_DATA(IP14_3, SD2_CMD), 1562 PINMUX_IPSR_DATA(IP14_3, SD2_CMD),
@@ -1572,123 +1571,123 @@ static const u16 pinmux_data[] = {
1572 PINMUX_IPSR_DATA(IP14_7, MMC_D3), 1571 PINMUX_IPSR_DATA(IP14_7, MMC_D3),
1573 PINMUX_IPSR_DATA(IP14_10_8, SD2_CD), 1572 PINMUX_IPSR_DATA(IP14_10_8, SD2_CD),
1574 PINMUX_IPSR_DATA(IP14_10_8, MMC_D4), 1573 PINMUX_IPSR_DATA(IP14_10_8, MMC_D4),
1575 PINMUX_IPSR_MODSEL_DATA(IP14_10_8, SCL8_C, SEL_IIC8_2), 1574 PINMUX_IPSR_MSEL(IP14_10_8, SCL8_C, SEL_IIC8_2),
1576 PINMUX_IPSR_MODSEL_DATA(IP14_10_8, TX5_B, SEL_SCIF5_1), 1575 PINMUX_IPSR_MSEL(IP14_10_8, TX5_B, SEL_SCIF5_1),
1577 PINMUX_IPSR_MODSEL_DATA(IP14_10_8, SCIFA5_TXD_C, SEL_SCIFA5_2), 1576 PINMUX_IPSR_MSEL(IP14_10_8, SCIFA5_TXD_C, SEL_SCIFA5_2),
1578 PINMUX_IPSR_DATA(IP14_13_11, SD2_WP), 1577 PINMUX_IPSR_DATA(IP14_13_11, SD2_WP),
1579 PINMUX_IPSR_DATA(IP14_13_11, MMC_D5), 1578 PINMUX_IPSR_DATA(IP14_13_11, MMC_D5),
1580 PINMUX_IPSR_MODSEL_DATA(IP14_13_11, SDA8_C, SEL_IIC8_2), 1579 PINMUX_IPSR_MSEL(IP14_13_11, SDA8_C, SEL_IIC8_2),
1581 PINMUX_IPSR_MODSEL_DATA(IP14_13_11, RX5_B, SEL_SCIF5_1), 1580 PINMUX_IPSR_MSEL(IP14_13_11, RX5_B, SEL_SCIF5_1),
1582 PINMUX_IPSR_MODSEL_DATA(IP14_13_11, SCIFA5_RXD_C, SEL_SCIFA5_2), 1581 PINMUX_IPSR_MSEL(IP14_13_11, SCIFA5_RXD_C, SEL_SCIFA5_2),
1583 PINMUX_IPSR_MODSEL_DATA(IP14_16_14, MSIOF0_SCK, SEL_SOF0_0), 1582 PINMUX_IPSR_MSEL(IP14_16_14, MSIOF0_SCK, SEL_SOF0_0),
1584 PINMUX_IPSR_MODSEL_DATA(IP14_16_14, RX2_C, SEL_SCIF2_2), 1583 PINMUX_IPSR_MSEL(IP14_16_14, RX2_C, SEL_SCIF2_2),
1585 PINMUX_IPSR_MODSEL_DATA(IP14_16_14, ADIDATA, SEL_RAD_0), 1584 PINMUX_IPSR_MSEL(IP14_16_14, ADIDATA, SEL_RAD_0),
1586 PINMUX_IPSR_MODSEL_DATA(IP14_16_14, VI1_CLK_C, SEL_VI1_2), 1585 PINMUX_IPSR_MSEL(IP14_16_14, VI1_CLK_C, SEL_VI1_2),
1587 PINMUX_IPSR_DATA(IP14_16_14, VI1_G0_B), 1586 PINMUX_IPSR_DATA(IP14_16_14, VI1_G0_B),
1588 PINMUX_IPSR_MODSEL_DATA(IP14_19_17, MSIOF0_SYNC, SEL_SOF0_0), 1587 PINMUX_IPSR_MSEL(IP14_19_17, MSIOF0_SYNC, SEL_SOF0_0),
1589 PINMUX_IPSR_MODSEL_DATA(IP14_19_17, TX2_C, SEL_SCIF2_2), 1588 PINMUX_IPSR_MSEL(IP14_19_17, TX2_C, SEL_SCIF2_2),
1590 PINMUX_IPSR_MODSEL_DATA(IP14_19_17, ADICS_SAMP, SEL_RAD_0), 1589 PINMUX_IPSR_MSEL(IP14_19_17, ADICS_SAMP, SEL_RAD_0),
1591 PINMUX_IPSR_MODSEL_DATA(IP14_19_17, VI1_CLKENB_C, SEL_VI1_2), 1590 PINMUX_IPSR_MSEL(IP14_19_17, VI1_CLKENB_C, SEL_VI1_2),
1592 PINMUX_IPSR_DATA(IP14_19_17, VI1_G1_B), 1591 PINMUX_IPSR_DATA(IP14_19_17, VI1_G1_B),
1593 PINMUX_IPSR_MODSEL_DATA(IP14_22_20, MSIOF0_TXD, SEL_SOF0_0), 1592 PINMUX_IPSR_MSEL(IP14_22_20, MSIOF0_TXD, SEL_SOF0_0),
1594 PINMUX_IPSR_MODSEL_DATA(IP14_22_20, ADICLK, SEL_RAD_0), 1593 PINMUX_IPSR_MSEL(IP14_22_20, ADICLK, SEL_RAD_0),
1595 PINMUX_IPSR_MODSEL_DATA(IP14_22_20, VI1_FIELD_C, SEL_VI1_2), 1594 PINMUX_IPSR_MSEL(IP14_22_20, VI1_FIELD_C, SEL_VI1_2),
1596 PINMUX_IPSR_DATA(IP14_22_20, VI1_G2_B), 1595 PINMUX_IPSR_DATA(IP14_22_20, VI1_G2_B),
1597 PINMUX_IPSR_MODSEL_DATA(IP14_25_23, MSIOF0_RXD, SEL_SOF0_0), 1596 PINMUX_IPSR_MSEL(IP14_25_23, MSIOF0_RXD, SEL_SOF0_0),
1598 PINMUX_IPSR_MODSEL_DATA(IP14_25_23, ADICHS0, SEL_RAD_0), 1597 PINMUX_IPSR_MSEL(IP14_25_23, ADICHS0, SEL_RAD_0),
1599 PINMUX_IPSR_MODSEL_DATA(IP14_25_23, VI1_DATA0_C, SEL_VI1_2), 1598 PINMUX_IPSR_MSEL(IP14_25_23, VI1_DATA0_C, SEL_VI1_2),
1600 PINMUX_IPSR_DATA(IP14_25_23, VI1_G3_B), 1599 PINMUX_IPSR_DATA(IP14_25_23, VI1_G3_B),
1601 PINMUX_IPSR_MODSEL_DATA(IP14_28_26, MSIOF0_SS1, SEL_SOF0_0), 1600 PINMUX_IPSR_MSEL(IP14_28_26, MSIOF0_SS1, SEL_SOF0_0),
1602 PINMUX_IPSR_MODSEL_DATA(IP14_28_26, MMC_D6, SEL_MMC_0), 1601 PINMUX_IPSR_MSEL(IP14_28_26, MMC_D6, SEL_MMC_0),
1603 PINMUX_IPSR_MODSEL_DATA(IP14_28_26, ADICHS1, SEL_RAD_0), 1602 PINMUX_IPSR_MSEL(IP14_28_26, ADICHS1, SEL_RAD_0),
1604 PINMUX_IPSR_MODSEL_DATA(IP14_28_26, TX0_E, SEL_SCIF0_4), 1603 PINMUX_IPSR_MSEL(IP14_28_26, TX0_E, SEL_SCIF0_4),
1605 PINMUX_IPSR_MODSEL_DATA(IP14_28_26, VI1_HSYNC_N_C, SEL_VI1_2), 1604 PINMUX_IPSR_MSEL(IP14_28_26, VI1_HSYNC_N_C, SEL_VI1_2),
1606 PINMUX_IPSR_MODSEL_DATA(IP14_28_26, SCL7_C, SEL_IIC7_2), 1605 PINMUX_IPSR_MSEL(IP14_28_26, SCL7_C, SEL_IIC7_2),
1607 PINMUX_IPSR_DATA(IP14_28_26, VI1_G4_B), 1606 PINMUX_IPSR_DATA(IP14_28_26, VI1_G4_B),
1608 PINMUX_IPSR_MODSEL_DATA(IP14_31_29, MSIOF0_SS2, SEL_SOF0_0), 1607 PINMUX_IPSR_MSEL(IP14_31_29, MSIOF0_SS2, SEL_SOF0_0),
1609 PINMUX_IPSR_MODSEL_DATA(IP14_31_29, MMC_D7, SEL_MMC_0), 1608 PINMUX_IPSR_MSEL(IP14_31_29, MMC_D7, SEL_MMC_0),
1610 PINMUX_IPSR_MODSEL_DATA(IP14_31_29, ADICHS2, SEL_RAD_0), 1609 PINMUX_IPSR_MSEL(IP14_31_29, ADICHS2, SEL_RAD_0),
1611 PINMUX_IPSR_MODSEL_DATA(IP14_31_29, RX0_E, SEL_SCIF0_4), 1610 PINMUX_IPSR_MSEL(IP14_31_29, RX0_E, SEL_SCIF0_4),
1612 PINMUX_IPSR_MODSEL_DATA(IP14_31_29, VI1_VSYNC_N_C, SEL_VI1_2), 1611 PINMUX_IPSR_MSEL(IP14_31_29, VI1_VSYNC_N_C, SEL_VI1_2),
1613 PINMUX_IPSR_MODSEL_DATA(IP14_31_29, SDA7_C, SEL_IIC7_2), 1612 PINMUX_IPSR_MSEL(IP14_31_29, SDA7_C, SEL_IIC7_2),
1614 PINMUX_IPSR_DATA(IP14_31_29, VI1_G5_B), 1613 PINMUX_IPSR_DATA(IP14_31_29, VI1_G5_B),
1615 1614
1616 /* IPSR15 */ 1615 /* IPSR15 */
1617 PINMUX_IPSR_MODSEL_DATA(IP15_1_0, SIM0_RST, SEL_SIM_0), 1616 PINMUX_IPSR_MSEL(IP15_1_0, SIM0_RST, SEL_SIM_0),
1618 PINMUX_IPSR_MODSEL_DATA(IP15_1_0, IETX, SEL_IEB_0), 1617 PINMUX_IPSR_MSEL(IP15_1_0, IETX, SEL_IEB_0),
1619 PINMUX_IPSR_MODSEL_DATA(IP15_1_0, CAN1_TX_D, SEL_CAN1_3), 1618 PINMUX_IPSR_MSEL(IP15_1_0, CAN1_TX_D, SEL_CAN1_3),
1620 PINMUX_IPSR_DATA(IP15_3_2, SIM0_CLK), 1619 PINMUX_IPSR_DATA(IP15_3_2, SIM0_CLK),
1621 PINMUX_IPSR_MODSEL_DATA(IP15_3_2, IECLK, SEL_IEB_0), 1620 PINMUX_IPSR_MSEL(IP15_3_2, IECLK, SEL_IEB_0),
1622 PINMUX_IPSR_MODSEL_DATA(IP15_3_2, CAN_CLK_C, SEL_CANCLK_2), 1621 PINMUX_IPSR_MSEL(IP15_3_2, CAN_CLK_C, SEL_CANCLK_2),
1623 PINMUX_IPSR_MODSEL_DATA(IP15_5_4, SIM0_D, SEL_SIM_0), 1622 PINMUX_IPSR_MSEL(IP15_5_4, SIM0_D, SEL_SIM_0),
1624 PINMUX_IPSR_MODSEL_DATA(IP15_5_4, IERX, SEL_IEB_0), 1623 PINMUX_IPSR_MSEL(IP15_5_4, IERX, SEL_IEB_0),
1625 PINMUX_IPSR_MODSEL_DATA(IP15_5_4, CAN1_RX_D, SEL_CAN1_3), 1624 PINMUX_IPSR_MSEL(IP15_5_4, CAN1_RX_D, SEL_CAN1_3),
1626 PINMUX_IPSR_MODSEL_DATA(IP15_8_6, GPS_CLK, SEL_GPS_0), 1625 PINMUX_IPSR_MSEL(IP15_8_6, GPS_CLK, SEL_GPS_0),
1627 PINMUX_IPSR_MODSEL_DATA(IP15_8_6, DU1_DOTCLKIN_C, SEL_DIS_2), 1626 PINMUX_IPSR_MSEL(IP15_8_6, DU1_DOTCLKIN_C, SEL_DIS_2),
1628 PINMUX_IPSR_MODSEL_DATA(IP15_8_6, AUDIO_CLKB_B, SEL_ADG_1), 1627 PINMUX_IPSR_MSEL(IP15_8_6, AUDIO_CLKB_B, SEL_ADG_1),
1629 PINMUX_IPSR_DATA(IP15_8_6, PWM5_B), 1628 PINMUX_IPSR_DATA(IP15_8_6, PWM5_B),
1630 PINMUX_IPSR_MODSEL_DATA(IP15_8_6, SCIFA3_TXD_C, SEL_SCIFA3_2), 1629 PINMUX_IPSR_MSEL(IP15_8_6, SCIFA3_TXD_C, SEL_SCIFA3_2),
1631 PINMUX_IPSR_MODSEL_DATA(IP15_11_9, GPS_SIGN, SEL_GPS_0), 1630 PINMUX_IPSR_MSEL(IP15_11_9, GPS_SIGN, SEL_GPS_0),
1632 PINMUX_IPSR_MODSEL_DATA(IP15_11_9, TX4_C, SEL_SCIF4_2), 1631 PINMUX_IPSR_MSEL(IP15_11_9, TX4_C, SEL_SCIF4_2),
1633 PINMUX_IPSR_MODSEL_DATA(IP15_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2), 1632 PINMUX_IPSR_MSEL(IP15_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
1634 PINMUX_IPSR_DATA(IP15_11_9, PWM5), 1633 PINMUX_IPSR_DATA(IP15_11_9, PWM5),
1635 PINMUX_IPSR_DATA(IP15_11_9, VI1_G6_B), 1634 PINMUX_IPSR_DATA(IP15_11_9, VI1_G6_B),
1636 PINMUX_IPSR_MODSEL_DATA(IP15_11_9, SCIFA3_RXD_C, SEL_SCIFA3_2), 1635 PINMUX_IPSR_MSEL(IP15_11_9, SCIFA3_RXD_C, SEL_SCIFA3_2),
1637 PINMUX_IPSR_MODSEL_DATA(IP15_14_12, GPS_MAG, SEL_GPS_0), 1636 PINMUX_IPSR_MSEL(IP15_14_12, GPS_MAG, SEL_GPS_0),
1638 PINMUX_IPSR_MODSEL_DATA(IP15_14_12, RX4_C, SEL_SCIF4_2), 1637 PINMUX_IPSR_MSEL(IP15_14_12, RX4_C, SEL_SCIF4_2),
1639 PINMUX_IPSR_MODSEL_DATA(IP15_14_12, SCIFA4_RXD_C, SEL_SCIFA4_2), 1638 PINMUX_IPSR_MSEL(IP15_14_12, SCIFA4_RXD_C, SEL_SCIFA4_2),
1640 PINMUX_IPSR_DATA(IP15_14_12, PWM6), 1639 PINMUX_IPSR_DATA(IP15_14_12, PWM6),
1641 PINMUX_IPSR_DATA(IP15_14_12, VI1_G7_B), 1640 PINMUX_IPSR_DATA(IP15_14_12, VI1_G7_B),
1642 PINMUX_IPSR_MODSEL_DATA(IP15_14_12, SCIFA3_SCK_C, SEL_SCIFA3_2), 1641 PINMUX_IPSR_MSEL(IP15_14_12, SCIFA3_SCK_C, SEL_SCIFA3_2),
1643 PINMUX_IPSR_MODSEL_DATA(IP15_17_15, HCTS0_N, SEL_HSCIF0_0), 1642 PINMUX_IPSR_MSEL(IP15_17_15, HCTS0_N, SEL_HSCIF0_0),
1644 PINMUX_IPSR_MODSEL_DATA(IP15_17_15, SCIFB0_CTS_N, SEL_SCIFB_0), 1643 PINMUX_IPSR_MSEL(IP15_17_15, SCIFB0_CTS_N, SEL_SCIFB_0),
1645 PINMUX_IPSR_MODSEL_DATA(IP15_17_15, GLO_I0_C, SEL_GPS_2), 1644 PINMUX_IPSR_MSEL(IP15_17_15, GLO_I0_C, SEL_GPS_2),
1646 PINMUX_IPSR_MODSEL_DATA(IP15_17_15, TCLK1, SEL_TMU1_0), 1645 PINMUX_IPSR_MSEL(IP15_17_15, TCLK1, SEL_TMU1_0),
1647 PINMUX_IPSR_MODSEL_DATA(IP15_17_15, VI1_DATA1_C, SEL_VI1_2), 1646 PINMUX_IPSR_MSEL(IP15_17_15, VI1_DATA1_C, SEL_VI1_2),
1648 PINMUX_IPSR_MODSEL_DATA(IP15_20_18, HRTS0_N, SEL_HSCIF0_0), 1647 PINMUX_IPSR_MSEL(IP15_20_18, HRTS0_N, SEL_HSCIF0_0),
1649 PINMUX_IPSR_MODSEL_DATA(IP15_20_18, SCIFB0_RTS_N, SEL_SCIFB_0), 1648 PINMUX_IPSR_MSEL(IP15_20_18, SCIFB0_RTS_N, SEL_SCIFB_0),
1650 PINMUX_IPSR_MODSEL_DATA(IP15_20_18, GLO_I1_C, SEL_GPS_2), 1649 PINMUX_IPSR_MSEL(IP15_20_18, GLO_I1_C, SEL_GPS_2),
1651 PINMUX_IPSR_MODSEL_DATA(IP15_20_18, VI1_DATA2_C, SEL_VI1_2), 1650 PINMUX_IPSR_MSEL(IP15_20_18, VI1_DATA2_C, SEL_VI1_2),
1652 PINMUX_IPSR_MODSEL_DATA(IP15_23_21, HSCK0, SEL_HSCIF0_0), 1651 PINMUX_IPSR_MSEL(IP15_23_21, HSCK0, SEL_HSCIF0_0),
1653 PINMUX_IPSR_MODSEL_DATA(IP15_23_21, SCIFB0_SCK, SEL_SCIFB_0), 1652 PINMUX_IPSR_MSEL(IP15_23_21, SCIFB0_SCK, SEL_SCIFB_0),
1654 PINMUX_IPSR_MODSEL_DATA(IP15_23_21, GLO_Q0_C, SEL_GPS_2), 1653 PINMUX_IPSR_MSEL(IP15_23_21, GLO_Q0_C, SEL_GPS_2),
1655 PINMUX_IPSR_MODSEL_DATA(IP15_23_21, CAN_CLK, SEL_CANCLK_0), 1654 PINMUX_IPSR_MSEL(IP15_23_21, CAN_CLK, SEL_CANCLK_0),
1656 PINMUX_IPSR_DATA(IP15_23_21, TCLK2), 1655 PINMUX_IPSR_DATA(IP15_23_21, TCLK2),
1657 PINMUX_IPSR_MODSEL_DATA(IP15_23_21, VI1_DATA3_C, SEL_VI1_2), 1656 PINMUX_IPSR_MSEL(IP15_23_21, VI1_DATA3_C, SEL_VI1_2),
1658 PINMUX_IPSR_MODSEL_DATA(IP15_26_24, HRX0, SEL_HSCIF0_0), 1657 PINMUX_IPSR_MSEL(IP15_26_24, HRX0, SEL_HSCIF0_0),
1659 PINMUX_IPSR_MODSEL_DATA(IP15_26_24, SCIFB0_RXD, SEL_SCIFB_0), 1658 PINMUX_IPSR_MSEL(IP15_26_24, SCIFB0_RXD, SEL_SCIFB_0),
1660 PINMUX_IPSR_MODSEL_DATA(IP15_26_24, GLO_Q1_C, SEL_GPS_2), 1659 PINMUX_IPSR_MSEL(IP15_26_24, GLO_Q1_C, SEL_GPS_2),
1661 PINMUX_IPSR_MODSEL_DATA(IP15_26_24, CAN0_RX_B, SEL_CAN0_1), 1660 PINMUX_IPSR_MSEL(IP15_26_24, CAN0_RX_B, SEL_CAN0_1),
1662 PINMUX_IPSR_MODSEL_DATA(IP15_26_24, VI1_DATA4_C, SEL_VI1_2), 1661 PINMUX_IPSR_MSEL(IP15_26_24, VI1_DATA4_C, SEL_VI1_2),
1663 PINMUX_IPSR_MODSEL_DATA(IP15_29_27, HTX0, SEL_HSCIF0_0), 1662 PINMUX_IPSR_MSEL(IP15_29_27, HTX0, SEL_HSCIF0_0),
1664 PINMUX_IPSR_MODSEL_DATA(IP15_29_27, SCIFB0_TXD, SEL_SCIFB_0), 1663 PINMUX_IPSR_MSEL(IP15_29_27, SCIFB0_TXD, SEL_SCIFB_0),
1665 PINMUX_IPSR_MODSEL_DATA(IP15_29_27, GLO_SCLK_C, SEL_GPS_2), 1664 PINMUX_IPSR_MSEL(IP15_29_27, GLO_SCLK_C, SEL_GPS_2),
1666 PINMUX_IPSR_MODSEL_DATA(IP15_29_27, CAN0_TX_B, SEL_CAN0_1), 1665 PINMUX_IPSR_MSEL(IP15_29_27, CAN0_TX_B, SEL_CAN0_1),
1667 PINMUX_IPSR_MODSEL_DATA(IP15_29_27, VI1_DATA5_C, SEL_VI1_2), 1666 PINMUX_IPSR_MSEL(IP15_29_27, VI1_DATA5_C, SEL_VI1_2),
1668 1667
1669 /* IPSR16 */ 1668 /* IPSR16 */
1670 PINMUX_IPSR_MODSEL_DATA(IP16_2_0, HRX1, SEL_HSCIF1_0), 1669 PINMUX_IPSR_MSEL(IP16_2_0, HRX1, SEL_HSCIF1_0),
1671 PINMUX_IPSR_MODSEL_DATA(IP16_2_0, SCIFB1_RXD, SEL_SCIFB1_0), 1670 PINMUX_IPSR_MSEL(IP16_2_0, SCIFB1_RXD, SEL_SCIFB1_0),
1672 PINMUX_IPSR_DATA(IP16_2_0, VI1_R0_B), 1671 PINMUX_IPSR_DATA(IP16_2_0, VI1_R0_B),
1673 PINMUX_IPSR_MODSEL_DATA(IP16_2_0, GLO_SDATA_C, SEL_GPS_2), 1672 PINMUX_IPSR_MSEL(IP16_2_0, GLO_SDATA_C, SEL_GPS_2),
1674 PINMUX_IPSR_MODSEL_DATA(IP16_2_0, VI1_DATA6_C, SEL_VI1_2), 1673 PINMUX_IPSR_MSEL(IP16_2_0, VI1_DATA6_C, SEL_VI1_2),
1675 PINMUX_IPSR_MODSEL_DATA(IP16_5_3, HTX1, SEL_HSCIF1_0), 1674 PINMUX_IPSR_MSEL(IP16_5_3, HTX1, SEL_HSCIF1_0),
1676 PINMUX_IPSR_MODSEL_DATA(IP16_5_3, SCIFB1_TXD, SEL_SCIFB1_0), 1675 PINMUX_IPSR_MSEL(IP16_5_3, SCIFB1_TXD, SEL_SCIFB1_0),
1677 PINMUX_IPSR_DATA(IP16_5_3, VI1_R1_B), 1676 PINMUX_IPSR_DATA(IP16_5_3, VI1_R1_B),
1678 PINMUX_IPSR_MODSEL_DATA(IP16_5_3, GLO_SS_C, SEL_GPS_2), 1677 PINMUX_IPSR_MSEL(IP16_5_3, GLO_SS_C, SEL_GPS_2),
1679 PINMUX_IPSR_MODSEL_DATA(IP16_5_3, VI1_DATA7_C, SEL_VI1_2), 1678 PINMUX_IPSR_MSEL(IP16_5_3, VI1_DATA7_C, SEL_VI1_2),
1680 PINMUX_IPSR_MODSEL_DATA(IP16_7_6, HSCK1, SEL_HSCIF1_0), 1679 PINMUX_IPSR_MSEL(IP16_7_6, HSCK1, SEL_HSCIF1_0),
1681 PINMUX_IPSR_MODSEL_DATA(IP16_7_6, SCIFB1_SCK, SEL_SCIFB1_0), 1680 PINMUX_IPSR_MSEL(IP16_7_6, SCIFB1_SCK, SEL_SCIFB1_0),
1682 PINMUX_IPSR_DATA(IP16_7_6, MLB_CLK), 1681 PINMUX_IPSR_DATA(IP16_7_6, MLB_CLK),
1683 PINMUX_IPSR_MODSEL_DATA(IP16_7_6, GLO_RFON_C, SEL_GPS_2), 1682 PINMUX_IPSR_MSEL(IP16_7_6, GLO_RFON_C, SEL_GPS_2),
1684 PINMUX_IPSR_MODSEL_DATA(IP16_9_8, HCTS1_N, SEL_HSCIF1_0), 1683 PINMUX_IPSR_MSEL(IP16_9_8, HCTS1_N, SEL_HSCIF1_0),
1685 PINMUX_IPSR_DATA(IP16_9_8, SCIFB1_CTS_N), 1684 PINMUX_IPSR_DATA(IP16_9_8, SCIFB1_CTS_N),
1686 PINMUX_IPSR_DATA(IP16_9_8, MLB_SIG), 1685 PINMUX_IPSR_DATA(IP16_9_8, MLB_SIG),
1687 PINMUX_IPSR_MODSEL_DATA(IP16_9_8, CAN1_TX_B, SEL_CAN1_1), 1686 PINMUX_IPSR_MSEL(IP16_9_8, CAN1_TX_B, SEL_CAN1_1),
1688 PINMUX_IPSR_MODSEL_DATA(IP16_11_10, HRTS1_N, SEL_HSCIF1_0), 1687 PINMUX_IPSR_MSEL(IP16_11_10, HRTS1_N, SEL_HSCIF1_0),
1689 PINMUX_IPSR_DATA(IP16_11_10, SCIFB1_RTS_N), 1688 PINMUX_IPSR_DATA(IP16_11_10, SCIFB1_RTS_N),
1690 PINMUX_IPSR_DATA(IP16_11_10, MLB_DAT), 1689 PINMUX_IPSR_DATA(IP16_11_10, MLB_DAT),
1691 PINMUX_IPSR_MODSEL_DATA(IP16_11_10, CAN1_RX_B, SEL_CAN1_1), 1690 PINMUX_IPSR_MSEL(IP16_11_10, CAN1_RX_B, SEL_CAN1_1),
1692}; 1691};
1693 1692
1694static const struct sh_pfc_pin pinmux_pins[] = { 1693static const struct sh_pfc_pin pinmux_pins[] = {
@@ -3986,24 +3985,6 @@ static const unsigned int usb1_mux[] = {
3986 USB1_PWEN_MARK, 3985 USB1_PWEN_MARK,
3987 USB1_OVC_MARK, 3986 USB1_OVC_MARK,
3988}; 3987};
3989
3990union vin_data {
3991 unsigned int data24[24];
3992 unsigned int data20[20];
3993 unsigned int data16[16];
3994 unsigned int data12[12];
3995 unsigned int data10[10];
3996 unsigned int data8[8];
3997};
3998
3999#define VIN_DATA_PIN_GROUP(n, s) \
4000 { \
4001 .name = #n#s, \
4002 .pins = n##_pins.data##s, \
4003 .mux = n##_mux.data##s, \
4004 .nr_pins = ARRAY_SIZE(n##_pins.data##s), \
4005 }
4006
4007/* - VIN0 ------------------------------------------------------------------- */ 3988/* - VIN0 ------------------------------------------------------------------- */
4008static const union vin_data vin0_data_pins = { 3989static const union vin_data vin0_data_pins = {
4009 .data24 = { 3990 .data24 = {
@@ -6337,8 +6318,8 @@ const struct sh_pfc_soc_info r8a7791_pinmux_info = {
6337 6318
6338 .cfg_regs = pinmux_config_regs, 6319 .cfg_regs = pinmux_config_regs,
6339 6320
6340 .gpio_data = pinmux_data, 6321 .pinmux_data = pinmux_data,
6341 .gpio_data_size = ARRAY_SIZE(pinmux_data), 6322 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
6342}; 6323};
6343#endif 6324#endif
6344 6325
@@ -6358,7 +6339,7 @@ const struct sh_pfc_soc_info r8a7793_pinmux_info = {
6358 6339
6359 .cfg_regs = pinmux_config_regs, 6340 .cfg_regs = pinmux_config_regs,
6360 6341
6361 .gpio_data = pinmux_data, 6342 .pinmux_data = pinmux_data,
6362 .gpio_data_size = ARRAY_SIZE(pinmux_data), 6343 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
6363}; 6344};
6364#endif 6345#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index 5248685dbb4e..086f6798b129 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -11,7 +11,6 @@
11 */ 11 */
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/platform_data/gpio-rcar.h>
15 14
16#include "core.h" 15#include "core.h"
17#include "sh_pfc.h" 16#include "sh_pfc.h"
@@ -644,10 +643,10 @@ static const u16 pinmux_data[] = {
644 643
645 /* IPSR0 */ 644 /* IPSR0 */
646 PINMUX_IPSR_DATA(IP0_0, SD1_CD), 645 PINMUX_IPSR_DATA(IP0_0, SD1_CD),
647 PINMUX_IPSR_MODSEL_DATA(IP0_0, CAN0_RX, SEL_CAN0_0), 646 PINMUX_IPSR_MSEL(IP0_0, CAN0_RX, SEL_CAN0_0),
648 PINMUX_IPSR_DATA(IP0_9_8, SD1_WP), 647 PINMUX_IPSR_DATA(IP0_9_8, SD1_WP),
649 PINMUX_IPSR_DATA(IP0_9_8, IRQ7), 648 PINMUX_IPSR_DATA(IP0_9_8, IRQ7),
650 PINMUX_IPSR_MODSEL_DATA(IP0_9_8, CAN0_TX, SEL_CAN0_0), 649 PINMUX_IPSR_MSEL(IP0_9_8, CAN0_TX, SEL_CAN0_0),
651 PINMUX_IPSR_DATA(IP0_10, MMC_CLK), 650 PINMUX_IPSR_DATA(IP0_10, MMC_CLK),
652 PINMUX_IPSR_DATA(IP0_10, SD2_CLK), 651 PINMUX_IPSR_DATA(IP0_10, SD2_CLK),
653 PINMUX_IPSR_DATA(IP0_11, MMC_CMD), 652 PINMUX_IPSR_DATA(IP0_11, MMC_CMD),
@@ -665,68 +664,68 @@ static const u16 pinmux_data[] = {
665 PINMUX_IPSR_DATA(IP0_17, MMC_D5), 664 PINMUX_IPSR_DATA(IP0_17, MMC_D5),
666 PINMUX_IPSR_DATA(IP0_17, SD2_WP), 665 PINMUX_IPSR_DATA(IP0_17, SD2_WP),
667 PINMUX_IPSR_DATA(IP0_19_18, MMC_D6), 666 PINMUX_IPSR_DATA(IP0_19_18, MMC_D6),
668 PINMUX_IPSR_MODSEL_DATA(IP0_19_18, SCIF0_RXD, SEL_SCIF0_0), 667 PINMUX_IPSR_MSEL(IP0_19_18, SCIF0_RXD, SEL_SCIF0_0),
669 PINMUX_IPSR_MODSEL_DATA(IP0_19_18, I2C2_SCL_B, SEL_I2C02_1), 668 PINMUX_IPSR_MSEL(IP0_19_18, I2C2_SCL_B, SEL_I2C02_1),
670 PINMUX_IPSR_MODSEL_DATA(IP0_19_18, CAN1_RX, SEL_CAN1_0), 669 PINMUX_IPSR_MSEL(IP0_19_18, CAN1_RX, SEL_CAN1_0),
671 PINMUX_IPSR_DATA(IP0_21_20, MMC_D7), 670 PINMUX_IPSR_DATA(IP0_21_20, MMC_D7),
672 PINMUX_IPSR_MODSEL_DATA(IP0_21_20, SCIF0_TXD, SEL_SCIF0_0), 671 PINMUX_IPSR_MSEL(IP0_21_20, SCIF0_TXD, SEL_SCIF0_0),
673 PINMUX_IPSR_MODSEL_DATA(IP0_21_20, I2C2_SDA_B, SEL_I2C02_1), 672 PINMUX_IPSR_MSEL(IP0_21_20, I2C2_SDA_B, SEL_I2C02_1),
674 PINMUX_IPSR_MODSEL_DATA(IP0_21_20, CAN1_TX, SEL_CAN1_0), 673 PINMUX_IPSR_MSEL(IP0_21_20, CAN1_TX, SEL_CAN1_0),
675 PINMUX_IPSR_DATA(IP0_23_22, D0), 674 PINMUX_IPSR_DATA(IP0_23_22, D0),
676 PINMUX_IPSR_MODSEL_DATA(IP0_23_22, SCIFA3_SCK_B, SEL_SCIFA3_1), 675 PINMUX_IPSR_MSEL(IP0_23_22, SCIFA3_SCK_B, SEL_SCIFA3_1),
677 PINMUX_IPSR_DATA(IP0_23_22, IRQ4), 676 PINMUX_IPSR_DATA(IP0_23_22, IRQ4),
678 PINMUX_IPSR_DATA(IP0_24, D1), 677 PINMUX_IPSR_DATA(IP0_24, D1),
679 PINMUX_IPSR_MODSEL_DATA(IP0_24, SCIFA3_RXD_B, SEL_SCIFA3_1), 678 PINMUX_IPSR_MSEL(IP0_24, SCIFA3_RXD_B, SEL_SCIFA3_1),
680 PINMUX_IPSR_DATA(IP0_25, D2), 679 PINMUX_IPSR_DATA(IP0_25, D2),
681 PINMUX_IPSR_MODSEL_DATA(IP0_25, SCIFA3_TXD_B, SEL_SCIFA3_1), 680 PINMUX_IPSR_MSEL(IP0_25, SCIFA3_TXD_B, SEL_SCIFA3_1),
682 PINMUX_IPSR_DATA(IP0_27_26, D3), 681 PINMUX_IPSR_DATA(IP0_27_26, D3),
683 PINMUX_IPSR_MODSEL_DATA(IP0_27_26, I2C3_SCL_B, SEL_I2C03_1), 682 PINMUX_IPSR_MSEL(IP0_27_26, I2C3_SCL_B, SEL_I2C03_1),
684 PINMUX_IPSR_MODSEL_DATA(IP0_27_26, SCIF5_RXD_B, SEL_SCIF5_1), 683 PINMUX_IPSR_MSEL(IP0_27_26, SCIF5_RXD_B, SEL_SCIF5_1),
685 PINMUX_IPSR_DATA(IP0_29_28, D4), 684 PINMUX_IPSR_DATA(IP0_29_28, D4),
686 PINMUX_IPSR_MODSEL_DATA(IP0_29_28, I2C3_SDA_B, SEL_I2C03_1), 685 PINMUX_IPSR_MSEL(IP0_29_28, I2C3_SDA_B, SEL_I2C03_1),
687 PINMUX_IPSR_MODSEL_DATA(IP0_29_28, SCIF5_TXD_B, SEL_SCIF5_1), 686 PINMUX_IPSR_MSEL(IP0_29_28, SCIF5_TXD_B, SEL_SCIF5_1),
688 PINMUX_IPSR_DATA(IP0_31_30, D5), 687 PINMUX_IPSR_DATA(IP0_31_30, D5),
689 PINMUX_IPSR_MODSEL_DATA(IP0_31_30, SCIF4_RXD_B, SEL_SCIF4_1), 688 PINMUX_IPSR_MSEL(IP0_31_30, SCIF4_RXD_B, SEL_SCIF4_1),
690 PINMUX_IPSR_MODSEL_DATA(IP0_31_30, I2C0_SCL_D, SEL_I2C00_3), 689 PINMUX_IPSR_MSEL(IP0_31_30, I2C0_SCL_D, SEL_I2C00_3),
691 690
692 /* IPSR1 */ 691 /* IPSR1 */
693 PINMUX_IPSR_DATA(IP1_1_0, D6), 692 PINMUX_IPSR_DATA(IP1_1_0, D6),
694 PINMUX_IPSR_MODSEL_DATA(IP1_1_0, SCIF4_TXD_B, SEL_SCIF4_1), 693 PINMUX_IPSR_MSEL(IP1_1_0, SCIF4_TXD_B, SEL_SCIF4_1),
695 PINMUX_IPSR_MODSEL_DATA(IP1_1_0, I2C0_SDA_D, SEL_I2C00_3), 694 PINMUX_IPSR_MSEL(IP1_1_0, I2C0_SDA_D, SEL_I2C00_3),
696 PINMUX_IPSR_DATA(IP1_3_2, D7), 695 PINMUX_IPSR_DATA(IP1_3_2, D7),
697 PINMUX_IPSR_DATA(IP1_3_2, IRQ3), 696 PINMUX_IPSR_DATA(IP1_3_2, IRQ3),
698 PINMUX_IPSR_MODSEL_DATA(IP1_3_2, TCLK1, SEL_TMU_0), 697 PINMUX_IPSR_MSEL(IP1_3_2, TCLK1, SEL_TMU_0),
699 PINMUX_IPSR_DATA(IP1_3_2, PWM6_B), 698 PINMUX_IPSR_DATA(IP1_3_2, PWM6_B),
700 PINMUX_IPSR_DATA(IP1_5_4, D8), 699 PINMUX_IPSR_DATA(IP1_5_4, D8),
701 PINMUX_IPSR_DATA(IP1_5_4, HSCIF2_HRX), 700 PINMUX_IPSR_DATA(IP1_5_4, HSCIF2_HRX),
702 PINMUX_IPSR_MODSEL_DATA(IP1_5_4, I2C1_SCL_B, SEL_I2C01_1), 701 PINMUX_IPSR_MSEL(IP1_5_4, I2C1_SCL_B, SEL_I2C01_1),
703 PINMUX_IPSR_DATA(IP1_7_6, D9), 702 PINMUX_IPSR_DATA(IP1_7_6, D9),
704 PINMUX_IPSR_DATA(IP1_7_6, HSCIF2_HTX), 703 PINMUX_IPSR_DATA(IP1_7_6, HSCIF2_HTX),
705 PINMUX_IPSR_MODSEL_DATA(IP1_7_6, I2C1_SDA_B, SEL_I2C01_1), 704 PINMUX_IPSR_MSEL(IP1_7_6, I2C1_SDA_B, SEL_I2C01_1),
706 PINMUX_IPSR_DATA(IP1_10_8, D10), 705 PINMUX_IPSR_DATA(IP1_10_8, D10),
707 PINMUX_IPSR_DATA(IP1_10_8, HSCIF2_HSCK), 706 PINMUX_IPSR_DATA(IP1_10_8, HSCIF2_HSCK),
708 PINMUX_IPSR_MODSEL_DATA(IP1_10_8, SCIF1_SCK_C, SEL_SCIF1_2), 707 PINMUX_IPSR_MSEL(IP1_10_8, SCIF1_SCK_C, SEL_SCIF1_2),
709 PINMUX_IPSR_DATA(IP1_10_8, IRQ6), 708 PINMUX_IPSR_DATA(IP1_10_8, IRQ6),
710 PINMUX_IPSR_DATA(IP1_10_8, PWM5_C), 709 PINMUX_IPSR_DATA(IP1_10_8, PWM5_C),
711 PINMUX_IPSR_DATA(IP1_12_11, D11), 710 PINMUX_IPSR_DATA(IP1_12_11, D11),
712 PINMUX_IPSR_DATA(IP1_12_11, HSCIF2_HCTS_N), 711 PINMUX_IPSR_DATA(IP1_12_11, HSCIF2_HCTS_N),
713 PINMUX_IPSR_MODSEL_DATA(IP1_12_11, SCIF1_RXD_C, SEL_SCIF1_2), 712 PINMUX_IPSR_MSEL(IP1_12_11, SCIF1_RXD_C, SEL_SCIF1_2),
714 PINMUX_IPSR_MODSEL_DATA(IP1_12_11, I2C1_SCL_D, SEL_I2C01_3), 713 PINMUX_IPSR_MSEL(IP1_12_11, I2C1_SCL_D, SEL_I2C01_3),
715 PINMUX_IPSR_DATA(IP1_14_13, D12), 714 PINMUX_IPSR_DATA(IP1_14_13, D12),
716 PINMUX_IPSR_DATA(IP1_14_13, HSCIF2_HRTS_N), 715 PINMUX_IPSR_DATA(IP1_14_13, HSCIF2_HRTS_N),
717 PINMUX_IPSR_MODSEL_DATA(IP1_14_13, SCIF1_TXD_C, SEL_SCIF1_2), 716 PINMUX_IPSR_MSEL(IP1_14_13, SCIF1_TXD_C, SEL_SCIF1_2),
718 PINMUX_IPSR_MODSEL_DATA(IP1_14_13, I2C1_SDA_D, SEL_I2C01_3), 717 PINMUX_IPSR_MSEL(IP1_14_13, I2C1_SDA_D, SEL_I2C01_3),
719 PINMUX_IPSR_DATA(IP1_17_15, D13), 718 PINMUX_IPSR_DATA(IP1_17_15, D13),
720 PINMUX_IPSR_MODSEL_DATA(IP1_17_15, SCIFA1_SCK, SEL_SCIFA1_0), 719 PINMUX_IPSR_MSEL(IP1_17_15, SCIFA1_SCK, SEL_SCIFA1_0),
721 PINMUX_IPSR_DATA(IP1_17_15, TANS1), 720 PINMUX_IPSR_DATA(IP1_17_15, TANS1),
722 PINMUX_IPSR_DATA(IP1_17_15, PWM2_C), 721 PINMUX_IPSR_DATA(IP1_17_15, PWM2_C),
723 PINMUX_IPSR_MODSEL_DATA(IP1_17_15, TCLK2_B, SEL_TMU_1), 722 PINMUX_IPSR_MSEL(IP1_17_15, TCLK2_B, SEL_TMU_1),
724 PINMUX_IPSR_DATA(IP1_19_18, D14), 723 PINMUX_IPSR_DATA(IP1_19_18, D14),
725 PINMUX_IPSR_MODSEL_DATA(IP1_19_18, SCIFA1_RXD, SEL_SCIFA1_0), 724 PINMUX_IPSR_MSEL(IP1_19_18, SCIFA1_RXD, SEL_SCIFA1_0),
726 PINMUX_IPSR_MODSEL_DATA(IP1_19_18, IIC0_SCL_B, SEL_IIC00_1), 725 PINMUX_IPSR_MSEL(IP1_19_18, IIC0_SCL_B, SEL_IIC00_1),
727 PINMUX_IPSR_DATA(IP1_21_20, D15), 726 PINMUX_IPSR_DATA(IP1_21_20, D15),
728 PINMUX_IPSR_MODSEL_DATA(IP1_21_20, SCIFA1_TXD, SEL_SCIFA1_0), 727 PINMUX_IPSR_MSEL(IP1_21_20, SCIFA1_TXD, SEL_SCIFA1_0),
729 PINMUX_IPSR_MODSEL_DATA(IP1_21_20, IIC0_SDA_B, SEL_IIC00_1), 728 PINMUX_IPSR_MSEL(IP1_21_20, IIC0_SDA_B, SEL_IIC00_1),
730 PINMUX_IPSR_DATA(IP1_23_22, A0), 729 PINMUX_IPSR_DATA(IP1_23_22, A0),
731 PINMUX_IPSR_DATA(IP1_23_22, SCIFB1_SCK), 730 PINMUX_IPSR_DATA(IP1_23_22, SCIFB1_SCK),
732 PINMUX_IPSR_DATA(IP1_23_22, PWM3_B), 731 PINMUX_IPSR_DATA(IP1_23_22, PWM3_B),
@@ -742,58 +741,58 @@ static const u16 pinmux_data[] = {
742 PINMUX_IPSR_DATA(IP1_29_28, TPUTO3_C), 741 PINMUX_IPSR_DATA(IP1_29_28, TPUTO3_C),
743 PINMUX_IPSR_DATA(IP1_31_30, A6), 742 PINMUX_IPSR_DATA(IP1_31_30, A6),
744 PINMUX_IPSR_DATA(IP1_31_30, SCIFB0_CTS_N), 743 PINMUX_IPSR_DATA(IP1_31_30, SCIFB0_CTS_N),
745 PINMUX_IPSR_MODSEL_DATA(IP1_31_30, SCIFA4_RXD_B, SEL_SCIFA4_1), 744 PINMUX_IPSR_MSEL(IP1_31_30, SCIFA4_RXD_B, SEL_SCIFA4_1),
746 PINMUX_IPSR_DATA(IP1_31_30, TPUTO2_C), 745 PINMUX_IPSR_DATA(IP1_31_30, TPUTO2_C),
747 746
748 /* IPSR2 */ 747 /* IPSR2 */
749 PINMUX_IPSR_DATA(IP2_1_0, A7), 748 PINMUX_IPSR_DATA(IP2_1_0, A7),
750 PINMUX_IPSR_DATA(IP2_1_0, SCIFB0_RTS_N), 749 PINMUX_IPSR_DATA(IP2_1_0, SCIFB0_RTS_N),
751 PINMUX_IPSR_MODSEL_DATA(IP2_1_0, SCIFA4_TXD_B, SEL_SCIFA4_1), 750 PINMUX_IPSR_MSEL(IP2_1_0, SCIFA4_TXD_B, SEL_SCIFA4_1),
752 PINMUX_IPSR_DATA(IP2_3_2, A8), 751 PINMUX_IPSR_DATA(IP2_3_2, A8),
753 PINMUX_IPSR_MODSEL_DATA(IP2_3_2, MSIOF1_RXD, SEL_MSI1_0), 752 PINMUX_IPSR_MSEL(IP2_3_2, MSIOF1_RXD, SEL_MSI1_0),
754 PINMUX_IPSR_MODSEL_DATA(IP2_3_2, SCIFA0_RXD_B, SEL_SCIFA0_1), 753 PINMUX_IPSR_MSEL(IP2_3_2, SCIFA0_RXD_B, SEL_SCIFA0_1),
755 PINMUX_IPSR_DATA(IP2_5_4, A9), 754 PINMUX_IPSR_DATA(IP2_5_4, A9),
756 PINMUX_IPSR_MODSEL_DATA(IP2_5_4, MSIOF1_TXD, SEL_MSI1_0), 755 PINMUX_IPSR_MSEL(IP2_5_4, MSIOF1_TXD, SEL_MSI1_0),
757 PINMUX_IPSR_MODSEL_DATA(IP2_5_4, SCIFA0_TXD_B, SEL_SCIFA0_1), 756 PINMUX_IPSR_MSEL(IP2_5_4, SCIFA0_TXD_B, SEL_SCIFA0_1),
758 PINMUX_IPSR_DATA(IP2_7_6, A10), 757 PINMUX_IPSR_DATA(IP2_7_6, A10),
759 PINMUX_IPSR_MODSEL_DATA(IP2_7_6, MSIOF1_SCK, SEL_MSI1_0), 758 PINMUX_IPSR_MSEL(IP2_7_6, MSIOF1_SCK, SEL_MSI1_0),
760 PINMUX_IPSR_MODSEL_DATA(IP2_7_6, IIC1_SCL_B, SEL_IIC01_1), 759 PINMUX_IPSR_MSEL(IP2_7_6, IIC1_SCL_B, SEL_IIC01_1),
761 PINMUX_IPSR_DATA(IP2_9_8, A11), 760 PINMUX_IPSR_DATA(IP2_9_8, A11),
762 PINMUX_IPSR_MODSEL_DATA(IP2_9_8, MSIOF1_SYNC, SEL_MSI1_0), 761 PINMUX_IPSR_MSEL(IP2_9_8, MSIOF1_SYNC, SEL_MSI1_0),
763 PINMUX_IPSR_MODSEL_DATA(IP2_9_8, IIC1_SDA_B, SEL_IIC01_1), 762 PINMUX_IPSR_MSEL(IP2_9_8, IIC1_SDA_B, SEL_IIC01_1),
764 PINMUX_IPSR_DATA(IP2_11_10, A12), 763 PINMUX_IPSR_DATA(IP2_11_10, A12),
765 PINMUX_IPSR_MODSEL_DATA(IP2_11_10, MSIOF1_SS1, SEL_MSI1_0), 764 PINMUX_IPSR_MSEL(IP2_11_10, MSIOF1_SS1, SEL_MSI1_0),
766 PINMUX_IPSR_MODSEL_DATA(IP2_11_10, SCIFA5_RXD_B, SEL_SCIFA5_1), 765 PINMUX_IPSR_MSEL(IP2_11_10, SCIFA5_RXD_B, SEL_SCIFA5_1),
767 PINMUX_IPSR_DATA(IP2_13_12, A13), 766 PINMUX_IPSR_DATA(IP2_13_12, A13),
768 PINMUX_IPSR_MODSEL_DATA(IP2_13_12, MSIOF1_SS2, SEL_MSI1_0), 767 PINMUX_IPSR_MSEL(IP2_13_12, MSIOF1_SS2, SEL_MSI1_0),
769 PINMUX_IPSR_MODSEL_DATA(IP2_13_12, SCIFA5_TXD_B, SEL_SCIFA5_1), 768 PINMUX_IPSR_MSEL(IP2_13_12, SCIFA5_TXD_B, SEL_SCIFA5_1),
770 PINMUX_IPSR_DATA(IP2_15_14, A14), 769 PINMUX_IPSR_DATA(IP2_15_14, A14),
771 PINMUX_IPSR_MODSEL_DATA(IP2_15_14, MSIOF2_RXD, SEL_MSI2_0), 770 PINMUX_IPSR_MSEL(IP2_15_14, MSIOF2_RXD, SEL_MSI2_0),
772 PINMUX_IPSR_MODSEL_DATA(IP2_15_14, HSCIF0_HRX_B, SEL_HSCIF0_1), 771 PINMUX_IPSR_MSEL(IP2_15_14, HSCIF0_HRX_B, SEL_HSCIF0_1),
773 PINMUX_IPSR_MODSEL_DATA(IP2_15_14, DREQ1_N, SEL_LBS_0), 772 PINMUX_IPSR_MSEL(IP2_15_14, DREQ1_N, SEL_LBS_0),
774 PINMUX_IPSR_DATA(IP2_17_16, A15), 773 PINMUX_IPSR_DATA(IP2_17_16, A15),
775 PINMUX_IPSR_MODSEL_DATA(IP2_17_16, MSIOF2_TXD, SEL_MSI2_0), 774 PINMUX_IPSR_MSEL(IP2_17_16, MSIOF2_TXD, SEL_MSI2_0),
776 PINMUX_IPSR_MODSEL_DATA(IP2_17_16, HSCIF0_HTX_B, SEL_HSCIF0_1), 775 PINMUX_IPSR_MSEL(IP2_17_16, HSCIF0_HTX_B, SEL_HSCIF0_1),
777 PINMUX_IPSR_MODSEL_DATA(IP2_17_16, DACK1, SEL_LBS_0), 776 PINMUX_IPSR_MSEL(IP2_17_16, DACK1, SEL_LBS_0),
778 PINMUX_IPSR_DATA(IP2_20_18, A16), 777 PINMUX_IPSR_DATA(IP2_20_18, A16),
779 PINMUX_IPSR_MODSEL_DATA(IP2_20_18, MSIOF2_SCK, SEL_MSI2_0), 778 PINMUX_IPSR_MSEL(IP2_20_18, MSIOF2_SCK, SEL_MSI2_0),
780 PINMUX_IPSR_MODSEL_DATA(IP2_20_18, HSCIF0_HSCK_B, SEL_HSCIF0_1), 779 PINMUX_IPSR_MSEL(IP2_20_18, HSCIF0_HSCK_B, SEL_HSCIF0_1),
781 PINMUX_IPSR_MODSEL_DATA(IP2_20_18, SPEEDIN, SEL_RSP_0), 780 PINMUX_IPSR_MSEL(IP2_20_18, SPEEDIN, SEL_RSP_0),
782 PINMUX_IPSR_MODSEL_DATA(IP2_20_18, VSP, SEL_SPDM_0), 781 PINMUX_IPSR_MSEL(IP2_20_18, VSP, SEL_SPDM_0),
783 PINMUX_IPSR_MODSEL_DATA(IP2_20_18, CAN_CLK_C, SEL_CAN_2), 782 PINMUX_IPSR_MSEL(IP2_20_18, CAN_CLK_C, SEL_CAN_2),
784 PINMUX_IPSR_DATA(IP2_20_18, TPUTO2_B), 783 PINMUX_IPSR_DATA(IP2_20_18, TPUTO2_B),
785 PINMUX_IPSR_DATA(IP2_23_21, A17), 784 PINMUX_IPSR_DATA(IP2_23_21, A17),
786 PINMUX_IPSR_MODSEL_DATA(IP2_23_21, MSIOF2_SYNC, SEL_MSI2_0), 785 PINMUX_IPSR_MSEL(IP2_23_21, MSIOF2_SYNC, SEL_MSI2_0),
787 PINMUX_IPSR_MODSEL_DATA(IP2_23_21, SCIF4_RXD_E, SEL_SCIF4_4), 786 PINMUX_IPSR_MSEL(IP2_23_21, SCIF4_RXD_E, SEL_SCIF4_4),
788 PINMUX_IPSR_MODSEL_DATA(IP2_23_21, CAN1_RX_B, SEL_CAN1_1), 787 PINMUX_IPSR_MSEL(IP2_23_21, CAN1_RX_B, SEL_CAN1_1),
789 PINMUX_IPSR_MODSEL_DATA(IP2_23_21, AVB_AVTP_CAPTURE_B, SEL_AVB_1), 788 PINMUX_IPSR_MSEL(IP2_23_21, AVB_AVTP_CAPTURE_B, SEL_AVB_1),
790 PINMUX_IPSR_DATA(IP2_26_24, A18), 789 PINMUX_IPSR_DATA(IP2_26_24, A18),
791 PINMUX_IPSR_MODSEL_DATA(IP2_26_24, MSIOF2_SS1, SEL_MSI2_0), 790 PINMUX_IPSR_MSEL(IP2_26_24, MSIOF2_SS1, SEL_MSI2_0),
792 PINMUX_IPSR_MODSEL_DATA(IP2_26_24, SCIF4_TXD_E, SEL_SCIF4_4), 791 PINMUX_IPSR_MSEL(IP2_26_24, SCIF4_TXD_E, SEL_SCIF4_4),
793 PINMUX_IPSR_MODSEL_DATA(IP2_26_24, CAN1_TX_B, SEL_CAN1_1), 792 PINMUX_IPSR_MSEL(IP2_26_24, CAN1_TX_B, SEL_CAN1_1),
794 PINMUX_IPSR_MODSEL_DATA(IP2_26_24, AVB_AVTP_MATCH_B, SEL_AVB_1), 793 PINMUX_IPSR_MSEL(IP2_26_24, AVB_AVTP_MATCH_B, SEL_AVB_1),
795 PINMUX_IPSR_DATA(IP2_29_27, A19), 794 PINMUX_IPSR_DATA(IP2_29_27, A19),
796 PINMUX_IPSR_MODSEL_DATA(IP2_29_27, MSIOF2_SS2, SEL_MSI2_0), 795 PINMUX_IPSR_MSEL(IP2_29_27, MSIOF2_SS2, SEL_MSI2_0),
797 PINMUX_IPSR_DATA(IP2_29_27, PWM4), 796 PINMUX_IPSR_DATA(IP2_29_27, PWM4),
798 PINMUX_IPSR_DATA(IP2_29_27, TPUTO2), 797 PINMUX_IPSR_DATA(IP2_29_27, TPUTO2),
799 PINMUX_IPSR_DATA(IP2_29_27, MOUT0), 798 PINMUX_IPSR_DATA(IP2_29_27, MOUT0),
@@ -831,42 +830,42 @@ static const u16 pinmux_data[] = {
831 PINMUX_IPSR_DATA(IP3_14_13, VI1_DATA11), 830 PINMUX_IPSR_DATA(IP3_14_13, VI1_DATA11),
832 PINMUX_IPSR_DATA(IP3_17_15, EX_CS2_N), 831 PINMUX_IPSR_DATA(IP3_17_15, EX_CS2_N),
833 PINMUX_IPSR_DATA(IP3_17_15, PWM0), 832 PINMUX_IPSR_DATA(IP3_17_15, PWM0),
834 PINMUX_IPSR_MODSEL_DATA(IP3_17_15, SCIF4_RXD_C, SEL_SCIF4_2), 833 PINMUX_IPSR_MSEL(IP3_17_15, SCIF4_RXD_C, SEL_SCIF4_2),
835 PINMUX_IPSR_MODSEL_DATA(IP3_17_15, TS_SDATA_B, SEL_TSIF0_1), 834 PINMUX_IPSR_MSEL(IP3_17_15, TS_SDATA_B, SEL_TSIF0_1),
836 PINMUX_IPSR_MODSEL_DATA(IP3_17_15, RIF0_SYNC, SEL_DR0_0), 835 PINMUX_IPSR_MSEL(IP3_17_15, RIF0_SYNC, SEL_DR0_0),
837 PINMUX_IPSR_DATA(IP3_17_15, TPUTO3), 836 PINMUX_IPSR_DATA(IP3_17_15, TPUTO3),
838 PINMUX_IPSR_DATA(IP3_17_15, SCIFB2_TXD), 837 PINMUX_IPSR_DATA(IP3_17_15, SCIFB2_TXD),
839 PINMUX_IPSR_MODSEL_DATA(IP3_17_15, SDATA_B, SEL_FSN_1), 838 PINMUX_IPSR_MSEL(IP3_17_15, SDATA_B, SEL_FSN_1),
840 PINMUX_IPSR_DATA(IP3_20_18, EX_CS3_N), 839 PINMUX_IPSR_DATA(IP3_20_18, EX_CS3_N),
841 PINMUX_IPSR_MODSEL_DATA(IP3_20_18, SCIFA2_SCK, SEL_SCIFA2_0), 840 PINMUX_IPSR_MSEL(IP3_20_18, SCIFA2_SCK, SEL_SCIFA2_0),
842 PINMUX_IPSR_MODSEL_DATA(IP3_20_18, SCIF4_TXD_C, SEL_SCIF4_2), 841 PINMUX_IPSR_MSEL(IP3_20_18, SCIF4_TXD_C, SEL_SCIF4_2),
843 PINMUX_IPSR_MODSEL_DATA(IP3_20_18, TS_SCK_B, SEL_TSIF0_1), 842 PINMUX_IPSR_MSEL(IP3_20_18, TS_SCK_B, SEL_TSIF0_1),
844 PINMUX_IPSR_MODSEL_DATA(IP3_20_18, RIF0_CLK, SEL_DR0_0), 843 PINMUX_IPSR_MSEL(IP3_20_18, RIF0_CLK, SEL_DR0_0),
845 PINMUX_IPSR_MODSEL_DATA(IP3_20_18, BPFCLK, SEL_DARC_0), 844 PINMUX_IPSR_MSEL(IP3_20_18, BPFCLK, SEL_DARC_0),
846 PINMUX_IPSR_DATA(IP3_20_18, SCIFB2_SCK), 845 PINMUX_IPSR_DATA(IP3_20_18, SCIFB2_SCK),
847 PINMUX_IPSR_MODSEL_DATA(IP3_20_18, MDATA_B, SEL_FSN_1), 846 PINMUX_IPSR_MSEL(IP3_20_18, MDATA_B, SEL_FSN_1),
848 PINMUX_IPSR_DATA(IP3_23_21, EX_CS4_N), 847 PINMUX_IPSR_DATA(IP3_23_21, EX_CS4_N),
849 PINMUX_IPSR_MODSEL_DATA(IP3_23_21, SCIFA2_RXD, SEL_SCIFA2_0), 848 PINMUX_IPSR_MSEL(IP3_23_21, SCIFA2_RXD, SEL_SCIFA2_0),
850 PINMUX_IPSR_MODSEL_DATA(IP3_23_21, I2C2_SCL_E, SEL_I2C02_4), 849 PINMUX_IPSR_MSEL(IP3_23_21, I2C2_SCL_E, SEL_I2C02_4),
851 PINMUX_IPSR_MODSEL_DATA(IP3_23_21, TS_SDEN_B, SEL_TSIF0_1), 850 PINMUX_IPSR_MSEL(IP3_23_21, TS_SDEN_B, SEL_TSIF0_1),
852 PINMUX_IPSR_MODSEL_DATA(IP3_23_21, RIF0_D0, SEL_DR0_0), 851 PINMUX_IPSR_MSEL(IP3_23_21, RIF0_D0, SEL_DR0_0),
853 PINMUX_IPSR_MODSEL_DATA(IP3_23_21, FMCLK, SEL_DARC_0), 852 PINMUX_IPSR_MSEL(IP3_23_21, FMCLK, SEL_DARC_0),
854 PINMUX_IPSR_DATA(IP3_23_21, SCIFB2_CTS_N), 853 PINMUX_IPSR_DATA(IP3_23_21, SCIFB2_CTS_N),
855 PINMUX_IPSR_MODSEL_DATA(IP3_23_21, SCKZ_B, SEL_FSN_1), 854 PINMUX_IPSR_MSEL(IP3_23_21, SCKZ_B, SEL_FSN_1),
856 PINMUX_IPSR_DATA(IP3_26_24, EX_CS5_N), 855 PINMUX_IPSR_DATA(IP3_26_24, EX_CS5_N),
857 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SCIFA2_TXD, SEL_SCIFA2_0), 856 PINMUX_IPSR_MSEL(IP3_26_24, SCIFA2_TXD, SEL_SCIFA2_0),
858 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, I2C2_SDA_E, SEL_I2C02_4), 857 PINMUX_IPSR_MSEL(IP3_26_24, I2C2_SDA_E, SEL_I2C02_4),
859 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, TS_SPSYNC_B, SEL_TSIF0_1), 858 PINMUX_IPSR_MSEL(IP3_26_24, TS_SPSYNC_B, SEL_TSIF0_1),
860 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, RIF0_D1, SEL_DR1_0), 859 PINMUX_IPSR_MSEL(IP3_26_24, RIF0_D1, SEL_DR1_0),
861 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, FMIN, SEL_DARC_0), 860 PINMUX_IPSR_MSEL(IP3_26_24, FMIN, SEL_DARC_0),
862 PINMUX_IPSR_DATA(IP3_26_24, SCIFB2_RTS_N), 861 PINMUX_IPSR_DATA(IP3_26_24, SCIFB2_RTS_N),
863 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, STM_N_B, SEL_FSN_1), 862 PINMUX_IPSR_MSEL(IP3_26_24, STM_N_B, SEL_FSN_1),
864 PINMUX_IPSR_DATA(IP3_29_27, BS_N), 863 PINMUX_IPSR_DATA(IP3_29_27, BS_N),
865 PINMUX_IPSR_DATA(IP3_29_27, DRACK0), 864 PINMUX_IPSR_DATA(IP3_29_27, DRACK0),
866 PINMUX_IPSR_DATA(IP3_29_27, PWM1_C), 865 PINMUX_IPSR_DATA(IP3_29_27, PWM1_C),
867 PINMUX_IPSR_DATA(IP3_29_27, TPUTO0_C), 866 PINMUX_IPSR_DATA(IP3_29_27, TPUTO0_C),
868 PINMUX_IPSR_DATA(IP3_29_27, ATACS01_N), 867 PINMUX_IPSR_DATA(IP3_29_27, ATACS01_N),
869 PINMUX_IPSR_MODSEL_DATA(IP3_29_27, MTS_N_B, SEL_FSN_1), 868 PINMUX_IPSR_MSEL(IP3_29_27, MTS_N_B, SEL_FSN_1),
870 PINMUX_IPSR_DATA(IP3_30, RD_N), 869 PINMUX_IPSR_DATA(IP3_30, RD_N),
871 PINMUX_IPSR_DATA(IP3_30, ATACS11_N), 870 PINMUX_IPSR_DATA(IP3_30, ATACS11_N),
872 PINMUX_IPSR_DATA(IP3_31, RD_WR_N), 871 PINMUX_IPSR_DATA(IP3_31, RD_WR_N),
@@ -874,18 +873,18 @@ static const u16 pinmux_data[] = {
874 873
875 /* IPSR4 */ 874 /* IPSR4 */
876 PINMUX_IPSR_DATA(IP4_1_0, EX_WAIT0), 875 PINMUX_IPSR_DATA(IP4_1_0, EX_WAIT0),
877 PINMUX_IPSR_MODSEL_DATA(IP4_1_0, CAN_CLK_B, SEL_CAN_1), 876 PINMUX_IPSR_MSEL(IP4_1_0, CAN_CLK_B, SEL_CAN_1),
878 PINMUX_IPSR_MODSEL_DATA(IP4_1_0, SCIF_CLK, SEL_SCIF0_0), 877 PINMUX_IPSR_MSEL(IP4_1_0, SCIF_CLK, SEL_SCIF0_0),
879 PINMUX_IPSR_DATA(IP4_1_0, PWMFSW0), 878 PINMUX_IPSR_DATA(IP4_1_0, PWMFSW0),
880 PINMUX_IPSR_DATA(IP4_4_2, DU0_DR0), 879 PINMUX_IPSR_DATA(IP4_4_2, DU0_DR0),
881 PINMUX_IPSR_DATA(IP4_4_2, LCDOUT16), 880 PINMUX_IPSR_DATA(IP4_4_2, LCDOUT16),
882 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, SCIF5_RXD_C, SEL_SCIF5_2), 881 PINMUX_IPSR_MSEL(IP4_4_2, SCIF5_RXD_C, SEL_SCIF5_2),
883 PINMUX_IPSR_MODSEL_DATA(IP4_4_2, I2C2_SCL_D, SEL_I2C02_3), 882 PINMUX_IPSR_MSEL(IP4_4_2, I2C2_SCL_D, SEL_I2C02_3),
884 PINMUX_IPSR_DATA(IP4_4_2, CC50_STATE0), 883 PINMUX_IPSR_DATA(IP4_4_2, CC50_STATE0),
885 PINMUX_IPSR_DATA(IP4_7_5, DU0_DR1), 884 PINMUX_IPSR_DATA(IP4_7_5, DU0_DR1),
886 PINMUX_IPSR_DATA(IP4_7_5, LCDOUT17), 885 PINMUX_IPSR_DATA(IP4_7_5, LCDOUT17),
887 PINMUX_IPSR_MODSEL_DATA(IP4_7_5, SCIF5_TXD_C, SEL_SCIF5_2), 886 PINMUX_IPSR_MSEL(IP4_7_5, SCIF5_TXD_C, SEL_SCIF5_2),
888 PINMUX_IPSR_MODSEL_DATA(IP4_7_5, I2C2_SDA_D, SEL_I2C02_3), 887 PINMUX_IPSR_MSEL(IP4_7_5, I2C2_SDA_D, SEL_I2C02_3),
889 PINMUX_IPSR_DATA(IP4_9_8, CC50_STATE1), 888 PINMUX_IPSR_DATA(IP4_9_8, CC50_STATE1),
890 PINMUX_IPSR_DATA(IP4_9_8, DU0_DR2), 889 PINMUX_IPSR_DATA(IP4_9_8, DU0_DR2),
891 PINMUX_IPSR_DATA(IP4_9_8, LCDOUT18), 890 PINMUX_IPSR_DATA(IP4_9_8, LCDOUT18),
@@ -907,13 +906,13 @@ static const u16 pinmux_data[] = {
907 PINMUX_IPSR_DATA(IP4_19_18, CC50_STATE7), 906 PINMUX_IPSR_DATA(IP4_19_18, CC50_STATE7),
908 PINMUX_IPSR_DATA(IP4_22_20, DU0_DG0), 907 PINMUX_IPSR_DATA(IP4_22_20, DU0_DG0),
909 PINMUX_IPSR_DATA(IP4_22_20, LCDOUT8), 908 PINMUX_IPSR_DATA(IP4_22_20, LCDOUT8),
910 PINMUX_IPSR_MODSEL_DATA(IP4_22_20, SCIFA0_RXD_C, SEL_SCIFA0_2), 909 PINMUX_IPSR_MSEL(IP4_22_20, SCIFA0_RXD_C, SEL_SCIFA0_2),
911 PINMUX_IPSR_MODSEL_DATA(IP4_22_20, I2C3_SCL_D, SEL_I2C03_3), 910 PINMUX_IPSR_MSEL(IP4_22_20, I2C3_SCL_D, SEL_I2C03_3),
912 PINMUX_IPSR_DATA(IP4_22_20, CC50_STATE8), 911 PINMUX_IPSR_DATA(IP4_22_20, CC50_STATE8),
913 PINMUX_IPSR_DATA(IP4_25_23, DU0_DG1), 912 PINMUX_IPSR_DATA(IP4_25_23, DU0_DG1),
914 PINMUX_IPSR_DATA(IP4_25_23, LCDOUT9), 913 PINMUX_IPSR_DATA(IP4_25_23, LCDOUT9),
915 PINMUX_IPSR_MODSEL_DATA(IP4_25_23, SCIFA0_TXD_C, SEL_SCIFA0_2), 914 PINMUX_IPSR_MSEL(IP4_25_23, SCIFA0_TXD_C, SEL_SCIFA0_2),
916 PINMUX_IPSR_MODSEL_DATA(IP4_25_23, I2C3_SDA_D, SEL_I2C03_3), 915 PINMUX_IPSR_MSEL(IP4_25_23, I2C3_SDA_D, SEL_I2C03_3),
917 PINMUX_IPSR_DATA(IP4_25_23, CC50_STATE9), 916 PINMUX_IPSR_DATA(IP4_25_23, CC50_STATE9),
918 PINMUX_IPSR_DATA(IP4_27_26, DU0_DG2), 917 PINMUX_IPSR_DATA(IP4_27_26, DU0_DG2),
919 PINMUX_IPSR_DATA(IP4_27_26, LCDOUT10), 918 PINMUX_IPSR_DATA(IP4_27_26, LCDOUT10),
@@ -937,15 +936,15 @@ static const u16 pinmux_data[] = {
937 PINMUX_IPSR_DATA(IP5_5_4, CC50_STATE15), 936 PINMUX_IPSR_DATA(IP5_5_4, CC50_STATE15),
938 PINMUX_IPSR_DATA(IP5_8_6, DU0_DB0), 937 PINMUX_IPSR_DATA(IP5_8_6, DU0_DB0),
939 PINMUX_IPSR_DATA(IP5_8_6, LCDOUT0), 938 PINMUX_IPSR_DATA(IP5_8_6, LCDOUT0),
940 PINMUX_IPSR_MODSEL_DATA(IP5_8_6, SCIFA4_RXD_C, SEL_SCIFA4_2), 939 PINMUX_IPSR_MSEL(IP5_8_6, SCIFA4_RXD_C, SEL_SCIFA4_2),
941 PINMUX_IPSR_MODSEL_DATA(IP5_8_6, I2C4_SCL_D, SEL_I2C04_3), 940 PINMUX_IPSR_MSEL(IP5_8_6, I2C4_SCL_D, SEL_I2C04_3),
942 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, CAN0_RX_C, SEL_CAN0_2), 941 PINMUX_IPSR_MSEL(IP7_8_6, CAN0_RX_C, SEL_CAN0_2),
943 PINMUX_IPSR_DATA(IP5_8_6, CC50_STATE16), 942 PINMUX_IPSR_DATA(IP5_8_6, CC50_STATE16),
944 PINMUX_IPSR_DATA(IP5_11_9, DU0_DB1), 943 PINMUX_IPSR_DATA(IP5_11_9, DU0_DB1),
945 PINMUX_IPSR_DATA(IP5_11_9, LCDOUT1), 944 PINMUX_IPSR_DATA(IP5_11_9, LCDOUT1),
946 PINMUX_IPSR_MODSEL_DATA(IP5_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2), 945 PINMUX_IPSR_MSEL(IP5_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
947 PINMUX_IPSR_MODSEL_DATA(IP5_11_9, I2C4_SDA_D, SEL_I2C04_3), 946 PINMUX_IPSR_MSEL(IP5_11_9, I2C4_SDA_D, SEL_I2C04_3),
948 PINMUX_IPSR_MODSEL_DATA(IP5_11_9, CAN0_TX_C, SEL_CAN0_2), 947 PINMUX_IPSR_MSEL(IP5_11_9, CAN0_TX_C, SEL_CAN0_2),
949 PINMUX_IPSR_DATA(IP5_11_9, CC50_STATE17), 948 PINMUX_IPSR_DATA(IP5_11_9, CC50_STATE17),
950 PINMUX_IPSR_DATA(IP5_13_12, DU0_DB2), 949 PINMUX_IPSR_DATA(IP5_13_12, DU0_DB2),
951 PINMUX_IPSR_DATA(IP5_13_12, LCDOUT2), 950 PINMUX_IPSR_DATA(IP5_13_12, LCDOUT2),
@@ -1010,501 +1009,501 @@ static const u16 pinmux_data[] = {
1010 PINMUX_IPSR_DATA(IP6_16, VI0_DATA7_VI0_B7), 1009 PINMUX_IPSR_DATA(IP6_16, VI0_DATA7_VI0_B7),
1011 PINMUX_IPSR_DATA(IP6_16, AVB_RXD6), 1010 PINMUX_IPSR_DATA(IP6_16, AVB_RXD6),
1012 PINMUX_IPSR_DATA(IP6_19_17, VI0_CLKENB), 1011 PINMUX_IPSR_DATA(IP6_19_17, VI0_CLKENB),
1013 PINMUX_IPSR_MODSEL_DATA(IP6_19_17, I2C3_SCL, SEL_I2C03_0), 1012 PINMUX_IPSR_MSEL(IP6_19_17, I2C3_SCL, SEL_I2C03_0),
1014 PINMUX_IPSR_MODSEL_DATA(IP6_19_17, SCIFA5_RXD_C, SEL_SCIFA5_2), 1013 PINMUX_IPSR_MSEL(IP6_19_17, SCIFA5_RXD_C, SEL_SCIFA5_2),
1015 PINMUX_IPSR_MODSEL_DATA(IP6_19_17, IETX_C, SEL_IEB_2), 1014 PINMUX_IPSR_MSEL(IP6_19_17, IETX_C, SEL_IEB_2),
1016 PINMUX_IPSR_DATA(IP6_19_17, AVB_RXD7), 1015 PINMUX_IPSR_DATA(IP6_19_17, AVB_RXD7),
1017 PINMUX_IPSR_DATA(IP6_22_20, VI0_FIELD), 1016 PINMUX_IPSR_DATA(IP6_22_20, VI0_FIELD),
1018 PINMUX_IPSR_MODSEL_DATA(IP6_22_20, I2C3_SDA, SEL_I2C03_0), 1017 PINMUX_IPSR_MSEL(IP6_22_20, I2C3_SDA, SEL_I2C03_0),
1019 PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCIFA5_TXD_C, SEL_SCIFA5_2), 1018 PINMUX_IPSR_MSEL(IP6_22_20, SCIFA5_TXD_C, SEL_SCIFA5_2),
1020 PINMUX_IPSR_MODSEL_DATA(IP6_22_20, IECLK_C, SEL_IEB_2), 1019 PINMUX_IPSR_MSEL(IP6_22_20, IECLK_C, SEL_IEB_2),
1021 PINMUX_IPSR_DATA(IP6_22_20, AVB_RX_ER), 1020 PINMUX_IPSR_DATA(IP6_22_20, AVB_RX_ER),
1022 PINMUX_IPSR_DATA(IP6_25_23, VI0_HSYNC_N), 1021 PINMUX_IPSR_DATA(IP6_25_23, VI0_HSYNC_N),
1023 PINMUX_IPSR_MODSEL_DATA(IP6_25_23, SCIF0_RXD_B, SEL_SCIF0_1), 1022 PINMUX_IPSR_MSEL(IP6_25_23, SCIF0_RXD_B, SEL_SCIF0_1),
1024 PINMUX_IPSR_MODSEL_DATA(IP6_25_23, I2C0_SCL_C, SEL_I2C00_2), 1023 PINMUX_IPSR_MSEL(IP6_25_23, I2C0_SCL_C, SEL_I2C00_2),
1025 PINMUX_IPSR_MODSEL_DATA(IP6_25_23, IERX_C, SEL_IEB_2), 1024 PINMUX_IPSR_MSEL(IP6_25_23, IERX_C, SEL_IEB_2),
1026 PINMUX_IPSR_DATA(IP6_25_23, AVB_COL), 1025 PINMUX_IPSR_DATA(IP6_25_23, AVB_COL),
1027 PINMUX_IPSR_DATA(IP6_28_26, VI0_VSYNC_N), 1026 PINMUX_IPSR_DATA(IP6_28_26, VI0_VSYNC_N),
1028 PINMUX_IPSR_MODSEL_DATA(IP6_28_26, SCIF0_TXD_B, SEL_SCIF0_1), 1027 PINMUX_IPSR_MSEL(IP6_28_26, SCIF0_TXD_B, SEL_SCIF0_1),
1029 PINMUX_IPSR_MODSEL_DATA(IP6_28_26, I2C0_SDA_C, SEL_I2C00_2), 1028 PINMUX_IPSR_MSEL(IP6_28_26, I2C0_SDA_C, SEL_I2C00_2),
1030 PINMUX_IPSR_MODSEL_DATA(IP6_28_26, AUDIO_CLKOUT_B, SEL_ADG_1), 1029 PINMUX_IPSR_MSEL(IP6_28_26, AUDIO_CLKOUT_B, SEL_ADG_1),
1031 PINMUX_IPSR_DATA(IP6_28_26, AVB_TX_EN), 1030 PINMUX_IPSR_DATA(IP6_28_26, AVB_TX_EN),
1032 PINMUX_IPSR_MODSEL_DATA(IP6_31_29, ETH_MDIO, SEL_ETH_0), 1031 PINMUX_IPSR_MSEL(IP6_31_29, ETH_MDIO, SEL_ETH_0),
1033 PINMUX_IPSR_DATA(IP6_31_29, VI0_G0), 1032 PINMUX_IPSR_DATA(IP6_31_29, VI0_G0),
1034 PINMUX_IPSR_MODSEL_DATA(IP6_31_29, MSIOF2_RXD_B, SEL_MSI2_1), 1033 PINMUX_IPSR_MSEL(IP6_31_29, MSIOF2_RXD_B, SEL_MSI2_1),
1035 PINMUX_IPSR_MODSEL_DATA(IP6_31_29, IIC0_SCL_D, SEL_IIC00_3), 1034 PINMUX_IPSR_MSEL(IP6_31_29, IIC0_SCL_D, SEL_IIC00_3),
1036 PINMUX_IPSR_DATA(IP6_31_29, AVB_TX_CLK), 1035 PINMUX_IPSR_DATA(IP6_31_29, AVB_TX_CLK),
1037 PINMUX_IPSR_MODSEL_DATA(IP6_31_29, ADIDATA, SEL_RAD_0), 1036 PINMUX_IPSR_MSEL(IP6_31_29, ADIDATA, SEL_RAD_0),
1038 PINMUX_IPSR_MODSEL_DATA(IP6_31_29, AD_DI, SEL_ADI_0), 1037 PINMUX_IPSR_MSEL(IP6_31_29, AD_DI, SEL_ADI_0),
1039 1038
1040 /* IPSR7 */ 1039 /* IPSR7 */
1041 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, ETH_CRS_DV, SEL_ETH_0), 1040 PINMUX_IPSR_MSEL(IP7_2_0, ETH_CRS_DV, SEL_ETH_0),
1042 PINMUX_IPSR_DATA(IP7_2_0, VI0_G1), 1041 PINMUX_IPSR_DATA(IP7_2_0, VI0_G1),
1043 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, MSIOF2_TXD_B, SEL_MSI2_1), 1042 PINMUX_IPSR_MSEL(IP7_2_0, MSIOF2_TXD_B, SEL_MSI2_1),
1044 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, IIC0_SDA_D, SEL_IIC00_3), 1043 PINMUX_IPSR_MSEL(IP7_2_0, IIC0_SDA_D, SEL_IIC00_3),
1045 PINMUX_IPSR_DATA(IP7_2_0, AVB_TXD0), 1044 PINMUX_IPSR_DATA(IP7_2_0, AVB_TXD0),
1046 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, ADICS_SAMP, SEL_RAD_0), 1045 PINMUX_IPSR_MSEL(IP7_2_0, ADICS_SAMP, SEL_RAD_0),
1047 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, AD_DO, SEL_ADI_0), 1046 PINMUX_IPSR_MSEL(IP7_2_0, AD_DO, SEL_ADI_0),
1048 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, ETH_RX_ER, SEL_ETH_0), 1047 PINMUX_IPSR_MSEL(IP7_5_3, ETH_RX_ER, SEL_ETH_0),
1049 PINMUX_IPSR_DATA(IP7_5_3, VI0_G2), 1048 PINMUX_IPSR_DATA(IP7_5_3, VI0_G2),
1050 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, MSIOF2_SCK_B, SEL_MSI2_1), 1049 PINMUX_IPSR_MSEL(IP7_5_3, MSIOF2_SCK_B, SEL_MSI2_1),
1051 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, CAN0_RX_B, SEL_CAN0_1), 1050 PINMUX_IPSR_MSEL(IP7_5_3, CAN0_RX_B, SEL_CAN0_1),
1052 PINMUX_IPSR_DATA(IP7_5_3, AVB_TXD1), 1051 PINMUX_IPSR_DATA(IP7_5_3, AVB_TXD1),
1053 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, ADICLK, SEL_RAD_0), 1052 PINMUX_IPSR_MSEL(IP7_5_3, ADICLK, SEL_RAD_0),
1054 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, AD_CLK, SEL_ADI_0), 1053 PINMUX_IPSR_MSEL(IP7_5_3, AD_CLK, SEL_ADI_0),
1055 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, ETH_RXD0, SEL_ETH_0), 1054 PINMUX_IPSR_MSEL(IP7_8_6, ETH_RXD0, SEL_ETH_0),
1056 PINMUX_IPSR_DATA(IP7_8_6, VI0_G3), 1055 PINMUX_IPSR_DATA(IP7_8_6, VI0_G3),
1057 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, MSIOF2_SYNC_B, SEL_MSI2_1), 1056 PINMUX_IPSR_MSEL(IP7_8_6, MSIOF2_SYNC_B, SEL_MSI2_1),
1058 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, CAN0_TX_B, SEL_CAN0_1), 1057 PINMUX_IPSR_MSEL(IP7_8_6, CAN0_TX_B, SEL_CAN0_1),
1059 PINMUX_IPSR_DATA(IP7_8_6, AVB_TXD2), 1058 PINMUX_IPSR_DATA(IP7_8_6, AVB_TXD2),
1060 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, ADICHS0, SEL_RAD_0), 1059 PINMUX_IPSR_MSEL(IP7_8_6, ADICHS0, SEL_RAD_0),
1061 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, AD_NCS_N, SEL_ADI_0), 1060 PINMUX_IPSR_MSEL(IP7_8_6, AD_NCS_N, SEL_ADI_0),
1062 PINMUX_IPSR_MODSEL_DATA(IP7_11_9, ETH_RXD1, SEL_ETH_0), 1061 PINMUX_IPSR_MSEL(IP7_11_9, ETH_RXD1, SEL_ETH_0),
1063 PINMUX_IPSR_DATA(IP7_11_9, VI0_G4), 1062 PINMUX_IPSR_DATA(IP7_11_9, VI0_G4),
1064 PINMUX_IPSR_MODSEL_DATA(IP7_11_9, MSIOF2_SS1_B, SEL_MSI2_1), 1063 PINMUX_IPSR_MSEL(IP7_11_9, MSIOF2_SS1_B, SEL_MSI2_1),
1065 PINMUX_IPSR_MODSEL_DATA(IP7_11_9, SCIF4_RXD_D, SEL_SCIF4_3), 1064 PINMUX_IPSR_MSEL(IP7_11_9, SCIF4_RXD_D, SEL_SCIF4_3),
1066 PINMUX_IPSR_DATA(IP7_11_9, AVB_TXD3), 1065 PINMUX_IPSR_DATA(IP7_11_9, AVB_TXD3),
1067 PINMUX_IPSR_MODSEL_DATA(IP7_11_9, ADICHS1, SEL_RAD_0), 1066 PINMUX_IPSR_MSEL(IP7_11_9, ADICHS1, SEL_RAD_0),
1068 PINMUX_IPSR_MODSEL_DATA(IP7_14_12, ETH_LINK, SEL_ETH_0), 1067 PINMUX_IPSR_MSEL(IP7_14_12, ETH_LINK, SEL_ETH_0),
1069 PINMUX_IPSR_DATA(IP7_14_12, VI0_G5), 1068 PINMUX_IPSR_DATA(IP7_14_12, VI0_G5),
1070 PINMUX_IPSR_MODSEL_DATA(IP7_14_12, MSIOF2_SS2_B, SEL_MSI2_1), 1069 PINMUX_IPSR_MSEL(IP7_14_12, MSIOF2_SS2_B, SEL_MSI2_1),
1071 PINMUX_IPSR_MODSEL_DATA(IP7_14_12, SCIF4_TXD_D, SEL_SCIF4_3), 1070 PINMUX_IPSR_MSEL(IP7_14_12, SCIF4_TXD_D, SEL_SCIF4_3),
1072 PINMUX_IPSR_DATA(IP7_14_12, AVB_TXD4), 1071 PINMUX_IPSR_DATA(IP7_14_12, AVB_TXD4),
1073 PINMUX_IPSR_MODSEL_DATA(IP7_14_12, ADICHS2, SEL_RAD_0), 1072 PINMUX_IPSR_MSEL(IP7_14_12, ADICHS2, SEL_RAD_0),
1074 PINMUX_IPSR_MODSEL_DATA(IP7_17_15, ETH_REFCLK, SEL_ETH_0), 1073 PINMUX_IPSR_MSEL(IP7_17_15, ETH_REFCLK, SEL_ETH_0),
1075 PINMUX_IPSR_DATA(IP7_17_15, VI0_G6), 1074 PINMUX_IPSR_DATA(IP7_17_15, VI0_G6),
1076 PINMUX_IPSR_MODSEL_DATA(IP7_17_15, SCIF2_SCK_C, SEL_SCIF2_2), 1075 PINMUX_IPSR_MSEL(IP7_17_15, SCIF2_SCK_C, SEL_SCIF2_2),
1077 PINMUX_IPSR_DATA(IP7_17_15, AVB_TXD5), 1076 PINMUX_IPSR_DATA(IP7_17_15, AVB_TXD5),
1078 PINMUX_IPSR_MODSEL_DATA(IP7_17_15, SSI_SCK5_B, SEL_SSI5_1), 1077 PINMUX_IPSR_MSEL(IP7_17_15, SSI_SCK5_B, SEL_SSI5_1),
1079 PINMUX_IPSR_MODSEL_DATA(IP7_20_18, ETH_TXD1, SEL_ETH_0), 1078 PINMUX_IPSR_MSEL(IP7_20_18, ETH_TXD1, SEL_ETH_0),
1080 PINMUX_IPSR_DATA(IP7_20_18, VI0_G7), 1079 PINMUX_IPSR_DATA(IP7_20_18, VI0_G7),
1081 PINMUX_IPSR_MODSEL_DATA(IP7_20_18, SCIF2_RXD_C, SEL_SCIF2_2), 1080 PINMUX_IPSR_MSEL(IP7_20_18, SCIF2_RXD_C, SEL_SCIF2_2),
1082 PINMUX_IPSR_MODSEL_DATA(IP7_20_18, IIC1_SCL_D, SEL_IIC01_3), 1081 PINMUX_IPSR_MSEL(IP7_20_18, IIC1_SCL_D, SEL_IIC01_3),
1083 PINMUX_IPSR_DATA(IP7_20_18, AVB_TXD6), 1082 PINMUX_IPSR_DATA(IP7_20_18, AVB_TXD6),
1084 PINMUX_IPSR_MODSEL_DATA(IP7_20_18, SSI_WS5_B, SEL_SSI5_1), 1083 PINMUX_IPSR_MSEL(IP7_20_18, SSI_WS5_B, SEL_SSI5_1),
1085 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, ETH_TX_EN, SEL_ETH_0), 1084 PINMUX_IPSR_MSEL(IP7_23_21, ETH_TX_EN, SEL_ETH_0),
1086 PINMUX_IPSR_DATA(IP7_23_21, VI0_R0), 1085 PINMUX_IPSR_DATA(IP7_23_21, VI0_R0),
1087 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, SCIF2_TXD_C, SEL_SCIF2_2), 1086 PINMUX_IPSR_MSEL(IP7_23_21, SCIF2_TXD_C, SEL_SCIF2_2),
1088 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, IIC1_SDA_D, SEL_IIC01_3), 1087 PINMUX_IPSR_MSEL(IP7_23_21, IIC1_SDA_D, SEL_IIC01_3),
1089 PINMUX_IPSR_DATA(IP7_23_21, AVB_TXD7), 1088 PINMUX_IPSR_DATA(IP7_23_21, AVB_TXD7),
1090 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, SSI_SDATA5_B, SEL_SSI5_1), 1089 PINMUX_IPSR_MSEL(IP7_23_21, SSI_SDATA5_B, SEL_SSI5_1),
1091 PINMUX_IPSR_MODSEL_DATA(IP7_26_24, ETH_MAGIC, SEL_ETH_0), 1090 PINMUX_IPSR_MSEL(IP7_26_24, ETH_MAGIC, SEL_ETH_0),
1092 PINMUX_IPSR_DATA(IP7_26_24, VI0_R1), 1091 PINMUX_IPSR_DATA(IP7_26_24, VI0_R1),
1093 PINMUX_IPSR_MODSEL_DATA(IP7_26_24, SCIF3_SCK_B, SEL_SCIF3_1), 1092 PINMUX_IPSR_MSEL(IP7_26_24, SCIF3_SCK_B, SEL_SCIF3_1),
1094 PINMUX_IPSR_DATA(IP7_26_24, AVB_TX_ER), 1093 PINMUX_IPSR_DATA(IP7_26_24, AVB_TX_ER),
1095 PINMUX_IPSR_MODSEL_DATA(IP7_26_24, SSI_SCK6_B, SEL_SSI6_1), 1094 PINMUX_IPSR_MSEL(IP7_26_24, SSI_SCK6_B, SEL_SSI6_1),
1096 PINMUX_IPSR_MODSEL_DATA(IP7_29_27, ETH_TXD0, SEL_ETH_0), 1095 PINMUX_IPSR_MSEL(IP7_29_27, ETH_TXD0, SEL_ETH_0),
1097 PINMUX_IPSR_DATA(IP7_29_27, VI0_R2), 1096 PINMUX_IPSR_DATA(IP7_29_27, VI0_R2),
1098 PINMUX_IPSR_MODSEL_DATA(IP7_29_27, SCIF3_RXD_B, SEL_SCIF3_1), 1097 PINMUX_IPSR_MSEL(IP7_29_27, SCIF3_RXD_B, SEL_SCIF3_1),
1099 PINMUX_IPSR_MODSEL_DATA(IP7_29_27, I2C4_SCL_E, SEL_I2C04_4), 1098 PINMUX_IPSR_MSEL(IP7_29_27, I2C4_SCL_E, SEL_I2C04_4),
1100 PINMUX_IPSR_DATA(IP7_29_27, AVB_GTX_CLK), 1099 PINMUX_IPSR_DATA(IP7_29_27, AVB_GTX_CLK),
1101 PINMUX_IPSR_MODSEL_DATA(IP7_29_27, SSI_WS6_B, SEL_SSI6_1), 1100 PINMUX_IPSR_MSEL(IP7_29_27, SSI_WS6_B, SEL_SSI6_1),
1102 PINMUX_IPSR_DATA(IP7_31, DREQ0_N), 1101 PINMUX_IPSR_DATA(IP7_31, DREQ0_N),
1103 PINMUX_IPSR_DATA(IP7_31, SCIFB1_RXD), 1102 PINMUX_IPSR_DATA(IP7_31, SCIFB1_RXD),
1104 1103
1105 /* IPSR8 */ 1104 /* IPSR8 */
1106 PINMUX_IPSR_MODSEL_DATA(IP8_2_0, ETH_MDC, SEL_ETH_0), 1105 PINMUX_IPSR_MSEL(IP8_2_0, ETH_MDC, SEL_ETH_0),
1107 PINMUX_IPSR_DATA(IP8_2_0, VI0_R3), 1106 PINMUX_IPSR_DATA(IP8_2_0, VI0_R3),
1108 PINMUX_IPSR_MODSEL_DATA(IP8_2_0, SCIF3_TXD_B, SEL_SCIF3_1), 1107 PINMUX_IPSR_MSEL(IP8_2_0, SCIF3_TXD_B, SEL_SCIF3_1),
1109 PINMUX_IPSR_MODSEL_DATA(IP8_2_0, I2C4_SDA_E, SEL_I2C04_4), 1108 PINMUX_IPSR_MSEL(IP8_2_0, I2C4_SDA_E, SEL_I2C04_4),
1110 PINMUX_IPSR_DATA(IP8_2_0, AVB_MDC), 1109 PINMUX_IPSR_DATA(IP8_2_0, AVB_MDC),
1111 PINMUX_IPSR_MODSEL_DATA(IP8_2_0, SSI_SDATA6_B, SEL_SSI6_1), 1110 PINMUX_IPSR_MSEL(IP8_2_0, SSI_SDATA6_B, SEL_SSI6_1),
1112 PINMUX_IPSR_MODSEL_DATA(IP8_5_3, HSCIF0_HRX, SEL_HSCIF0_0), 1111 PINMUX_IPSR_MSEL(IP8_5_3, HSCIF0_HRX, SEL_HSCIF0_0),
1113 PINMUX_IPSR_DATA(IP8_5_3, VI0_R4), 1112 PINMUX_IPSR_DATA(IP8_5_3, VI0_R4),
1114 PINMUX_IPSR_MODSEL_DATA(IP8_5_3, I2C1_SCL_C, SEL_I2C01_2), 1113 PINMUX_IPSR_MSEL(IP8_5_3, I2C1_SCL_C, SEL_I2C01_2),
1115 PINMUX_IPSR_MODSEL_DATA(IP8_5_3, AUDIO_CLKA_B, SEL_ADG_1), 1114 PINMUX_IPSR_MSEL(IP8_5_3, AUDIO_CLKA_B, SEL_ADG_1),
1116 PINMUX_IPSR_DATA(IP8_5_3, AVB_MDIO), 1115 PINMUX_IPSR_DATA(IP8_5_3, AVB_MDIO),
1117 PINMUX_IPSR_MODSEL_DATA(IP8_5_3, SSI_SCK78_B, SEL_SSI7_1), 1116 PINMUX_IPSR_MSEL(IP8_5_3, SSI_SCK78_B, SEL_SSI7_1),
1118 PINMUX_IPSR_MODSEL_DATA(IP8_8_6, HSCIF0_HTX, SEL_HSCIF0_0), 1117 PINMUX_IPSR_MSEL(IP8_8_6, HSCIF0_HTX, SEL_HSCIF0_0),
1119 PINMUX_IPSR_DATA(IP8_8_6, VI0_R5), 1118 PINMUX_IPSR_DATA(IP8_8_6, VI0_R5),
1120 PINMUX_IPSR_MODSEL_DATA(IP8_8_6, I2C1_SDA_C, SEL_I2C01_2), 1119 PINMUX_IPSR_MSEL(IP8_8_6, I2C1_SDA_C, SEL_I2C01_2),
1121 PINMUX_IPSR_MODSEL_DATA(IP8_8_6, AUDIO_CLKB_B, SEL_ADG_1), 1120 PINMUX_IPSR_MSEL(IP8_8_6, AUDIO_CLKB_B, SEL_ADG_1),
1122 PINMUX_IPSR_DATA(IP8_5_3, AVB_LINK), 1121 PINMUX_IPSR_DATA(IP8_5_3, AVB_LINK),
1123 PINMUX_IPSR_MODSEL_DATA(IP8_8_6, SSI_WS78_B, SEL_SSI7_1), 1122 PINMUX_IPSR_MSEL(IP8_8_6, SSI_WS78_B, SEL_SSI7_1),
1124 PINMUX_IPSR_DATA(IP8_11_9, HSCIF0_HCTS_N), 1123 PINMUX_IPSR_DATA(IP8_11_9, HSCIF0_HCTS_N),
1125 PINMUX_IPSR_DATA(IP8_11_9, VI0_R6), 1124 PINMUX_IPSR_DATA(IP8_11_9, VI0_R6),
1126 PINMUX_IPSR_MODSEL_DATA(IP8_11_9, SCIF0_RXD_D, SEL_SCIF0_3), 1125 PINMUX_IPSR_MSEL(IP8_11_9, SCIF0_RXD_D, SEL_SCIF0_3),
1127 PINMUX_IPSR_MODSEL_DATA(IP8_11_9, I2C0_SCL_E, SEL_I2C00_4), 1126 PINMUX_IPSR_MSEL(IP8_11_9, I2C0_SCL_E, SEL_I2C00_4),
1128 PINMUX_IPSR_DATA(IP8_11_9, AVB_MAGIC), 1127 PINMUX_IPSR_DATA(IP8_11_9, AVB_MAGIC),
1129 PINMUX_IPSR_MODSEL_DATA(IP8_11_9, SSI_SDATA7_B, SEL_SSI7_1), 1128 PINMUX_IPSR_MSEL(IP8_11_9, SSI_SDATA7_B, SEL_SSI7_1),
1130 PINMUX_IPSR_DATA(IP8_14_12, HSCIF0_HRTS_N), 1129 PINMUX_IPSR_DATA(IP8_14_12, HSCIF0_HRTS_N),
1131 PINMUX_IPSR_DATA(IP8_14_12, VI0_R7), 1130 PINMUX_IPSR_DATA(IP8_14_12, VI0_R7),
1132 PINMUX_IPSR_MODSEL_DATA(IP8_14_12, SCIF0_TXD_D, SEL_SCIF0_3), 1131 PINMUX_IPSR_MSEL(IP8_14_12, SCIF0_TXD_D, SEL_SCIF0_3),
1133 PINMUX_IPSR_MODSEL_DATA(IP8_14_12, I2C0_SDA_E, SEL_I2C00_4), 1132 PINMUX_IPSR_MSEL(IP8_14_12, I2C0_SDA_E, SEL_I2C00_4),
1134 PINMUX_IPSR_DATA(IP8_14_12, AVB_PHY_INT), 1133 PINMUX_IPSR_DATA(IP8_14_12, AVB_PHY_INT),
1135 PINMUX_IPSR_MODSEL_DATA(IP8_14_12, SSI_SDATA8_B, SEL_SSI8_1), 1134 PINMUX_IPSR_MSEL(IP8_14_12, SSI_SDATA8_B, SEL_SSI8_1),
1136 PINMUX_IPSR_MODSEL_DATA(IP8_16_15, HSCIF0_HSCK, SEL_HSCIF0_0), 1135 PINMUX_IPSR_MSEL(IP8_16_15, HSCIF0_HSCK, SEL_HSCIF0_0),
1137 PINMUX_IPSR_MODSEL_DATA(IP8_16_15, SCIF_CLK_B, SEL_SCIF0_1), 1136 PINMUX_IPSR_MSEL(IP8_16_15, SCIF_CLK_B, SEL_SCIF0_1),
1138 PINMUX_IPSR_DATA(IP8_16_15, AVB_CRS), 1137 PINMUX_IPSR_DATA(IP8_16_15, AVB_CRS),
1139 PINMUX_IPSR_MODSEL_DATA(IP8_16_15, AUDIO_CLKC_B, SEL_ADG_1), 1138 PINMUX_IPSR_MSEL(IP8_16_15, AUDIO_CLKC_B, SEL_ADG_1),
1140 PINMUX_IPSR_MODSEL_DATA(IP8_19_17, I2C0_SCL, SEL_I2C00_0), 1139 PINMUX_IPSR_MSEL(IP8_19_17, I2C0_SCL, SEL_I2C00_0),
1141 PINMUX_IPSR_MODSEL_DATA(IP8_19_17, SCIF0_RXD_C, SEL_SCIF0_2), 1140 PINMUX_IPSR_MSEL(IP8_19_17, SCIF0_RXD_C, SEL_SCIF0_2),
1142 PINMUX_IPSR_DATA(IP8_19_17, PWM5), 1141 PINMUX_IPSR_DATA(IP8_19_17, PWM5),
1143 PINMUX_IPSR_MODSEL_DATA(IP8_19_17, TCLK1_B, SEL_TMU_1), 1142 PINMUX_IPSR_MSEL(IP8_19_17, TCLK1_B, SEL_TMU_1),
1144 PINMUX_IPSR_DATA(IP8_19_17, AVB_GTXREFCLK), 1143 PINMUX_IPSR_DATA(IP8_19_17, AVB_GTXREFCLK),
1145 PINMUX_IPSR_MODSEL_DATA(IP8_19_17, CAN1_RX_D, SEL_CAN1_3), 1144 PINMUX_IPSR_MSEL(IP8_19_17, CAN1_RX_D, SEL_CAN1_3),
1146 PINMUX_IPSR_DATA(IP8_19_17, TPUTO0_B), 1145 PINMUX_IPSR_DATA(IP8_19_17, TPUTO0_B),
1147 PINMUX_IPSR_MODSEL_DATA(IP8_22_20, I2C0_SDA, SEL_I2C00_0), 1146 PINMUX_IPSR_MSEL(IP8_22_20, I2C0_SDA, SEL_I2C00_0),
1148 PINMUX_IPSR_MODSEL_DATA(IP8_22_20, SCIF0_TXD_C, SEL_SCIF0_2), 1147 PINMUX_IPSR_MSEL(IP8_22_20, SCIF0_TXD_C, SEL_SCIF0_2),
1149 PINMUX_IPSR_DATA(IP8_22_20, TPUTO0), 1148 PINMUX_IPSR_DATA(IP8_22_20, TPUTO0),
1150 PINMUX_IPSR_MODSEL_DATA(IP8_22_20, CAN_CLK, SEL_CAN_0), 1149 PINMUX_IPSR_MSEL(IP8_22_20, CAN_CLK, SEL_CAN_0),
1151 PINMUX_IPSR_DATA(IP8_22_20, DVC_MUTE), 1150 PINMUX_IPSR_DATA(IP8_22_20, DVC_MUTE),
1152 PINMUX_IPSR_MODSEL_DATA(IP8_22_20, CAN1_TX_D, SEL_CAN1_3), 1151 PINMUX_IPSR_MSEL(IP8_22_20, CAN1_TX_D, SEL_CAN1_3),
1153 PINMUX_IPSR_MODSEL_DATA(IP8_25_23, I2C1_SCL, SEL_I2C01_0), 1152 PINMUX_IPSR_MSEL(IP8_25_23, I2C1_SCL, SEL_I2C01_0),
1154 PINMUX_IPSR_MODSEL_DATA(IP8_25_23, SCIF4_RXD, SEL_SCIF4_0), 1153 PINMUX_IPSR_MSEL(IP8_25_23, SCIF4_RXD, SEL_SCIF4_0),
1155 PINMUX_IPSR_DATA(IP8_25_23, PWM5_B), 1154 PINMUX_IPSR_DATA(IP8_25_23, PWM5_B),
1156 PINMUX_IPSR_DATA(IP8_25_23, DU1_DR0), 1155 PINMUX_IPSR_DATA(IP8_25_23, DU1_DR0),
1157 PINMUX_IPSR_MODSEL_DATA(IP8_25_23, RIF1_SYNC_B, SEL_DR2_1), 1156 PINMUX_IPSR_MSEL(IP8_25_23, RIF1_SYNC_B, SEL_DR2_1),
1158 PINMUX_IPSR_MODSEL_DATA(IP8_25_23, TS_SDATA_D, SEL_TSIF0_3), 1157 PINMUX_IPSR_MSEL(IP8_25_23, TS_SDATA_D, SEL_TSIF0_3),
1159 PINMUX_IPSR_DATA(IP8_25_23, TPUTO1_B), 1158 PINMUX_IPSR_DATA(IP8_25_23, TPUTO1_B),
1160 PINMUX_IPSR_MODSEL_DATA(IP8_28_26, I2C1_SDA, SEL_I2C01_0), 1159 PINMUX_IPSR_MSEL(IP8_28_26, I2C1_SDA, SEL_I2C01_0),
1161 PINMUX_IPSR_MODSEL_DATA(IP8_28_26, SCIF4_TXD, SEL_SCIF4_0), 1160 PINMUX_IPSR_MSEL(IP8_28_26, SCIF4_TXD, SEL_SCIF4_0),
1162 PINMUX_IPSR_DATA(IP8_28_26, IRQ5), 1161 PINMUX_IPSR_DATA(IP8_28_26, IRQ5),
1163 PINMUX_IPSR_DATA(IP8_28_26, DU1_DR1), 1162 PINMUX_IPSR_DATA(IP8_28_26, DU1_DR1),
1164 PINMUX_IPSR_MODSEL_DATA(IP8_28_26, RIF1_CLK_B, SEL_DR2_1), 1163 PINMUX_IPSR_MSEL(IP8_28_26, RIF1_CLK_B, SEL_DR2_1),
1165 PINMUX_IPSR_MODSEL_DATA(IP8_28_26, TS_SCK_D, SEL_TSIF0_3), 1164 PINMUX_IPSR_MSEL(IP8_28_26, TS_SCK_D, SEL_TSIF0_3),
1166 PINMUX_IPSR_MODSEL_DATA(IP8_28_26, BPFCLK_C, SEL_DARC_2), 1165 PINMUX_IPSR_MSEL(IP8_28_26, BPFCLK_C, SEL_DARC_2),
1167 PINMUX_IPSR_DATA(IP8_31_29, MSIOF0_RXD), 1166 PINMUX_IPSR_DATA(IP8_31_29, MSIOF0_RXD),
1168 PINMUX_IPSR_MODSEL_DATA(IP8_31_29, SCIF5_RXD, SEL_SCIF5_0), 1167 PINMUX_IPSR_MSEL(IP8_31_29, SCIF5_RXD, SEL_SCIF5_0),
1169 PINMUX_IPSR_MODSEL_DATA(IP8_31_29, I2C2_SCL_C, SEL_I2C02_2), 1168 PINMUX_IPSR_MSEL(IP8_31_29, I2C2_SCL_C, SEL_I2C02_2),
1170 PINMUX_IPSR_DATA(IP8_31_29, DU1_DR2), 1169 PINMUX_IPSR_DATA(IP8_31_29, DU1_DR2),
1171 PINMUX_IPSR_MODSEL_DATA(IP8_31_29, RIF1_D0_B, SEL_DR2_1), 1170 PINMUX_IPSR_MSEL(IP8_31_29, RIF1_D0_B, SEL_DR2_1),
1172 PINMUX_IPSR_MODSEL_DATA(IP8_31_29, TS_SDEN_D, SEL_TSIF0_3), 1171 PINMUX_IPSR_MSEL(IP8_31_29, TS_SDEN_D, SEL_TSIF0_3),
1173 PINMUX_IPSR_MODSEL_DATA(IP8_31_29, FMCLK_C, SEL_DARC_2), 1172 PINMUX_IPSR_MSEL(IP8_31_29, FMCLK_C, SEL_DARC_2),
1174 PINMUX_IPSR_MODSEL_DATA(IP8_31_29, RDS_CLK, SEL_RDS_0), 1173 PINMUX_IPSR_MSEL(IP8_31_29, RDS_CLK, SEL_RDS_0),
1175 1174
1176 /* IPSR9 */ 1175 /* IPSR9 */
1177 PINMUX_IPSR_DATA(IP9_2_0, MSIOF0_TXD), 1176 PINMUX_IPSR_DATA(IP9_2_0, MSIOF0_TXD),
1178 PINMUX_IPSR_MODSEL_DATA(IP9_2_0, SCIF5_TXD, SEL_SCIF5_0), 1177 PINMUX_IPSR_MSEL(IP9_2_0, SCIF5_TXD, SEL_SCIF5_0),
1179 PINMUX_IPSR_MODSEL_DATA(IP9_2_0, I2C2_SDA_C, SEL_I2C02_2), 1178 PINMUX_IPSR_MSEL(IP9_2_0, I2C2_SDA_C, SEL_I2C02_2),
1180 PINMUX_IPSR_DATA(IP9_2_0, DU1_DR3), 1179 PINMUX_IPSR_DATA(IP9_2_0, DU1_DR3),
1181 PINMUX_IPSR_MODSEL_DATA(IP9_2_0, RIF1_D1_B, SEL_DR3_1), 1180 PINMUX_IPSR_MSEL(IP9_2_0, RIF1_D1_B, SEL_DR3_1),
1182 PINMUX_IPSR_MODSEL_DATA(IP9_2_0, TS_SPSYNC_D, SEL_TSIF0_3), 1181 PINMUX_IPSR_MSEL(IP9_2_0, TS_SPSYNC_D, SEL_TSIF0_3),
1183 PINMUX_IPSR_MODSEL_DATA(IP9_2_0, FMIN_C, SEL_DARC_2), 1182 PINMUX_IPSR_MSEL(IP9_2_0, FMIN_C, SEL_DARC_2),
1184 PINMUX_IPSR_MODSEL_DATA(IP9_2_0, RDS_DATA, SEL_RDS_0), 1183 PINMUX_IPSR_MSEL(IP9_2_0, RDS_DATA, SEL_RDS_0),
1185 PINMUX_IPSR_DATA(IP9_5_3, MSIOF0_SCK), 1184 PINMUX_IPSR_DATA(IP9_5_3, MSIOF0_SCK),
1186 PINMUX_IPSR_DATA(IP9_5_3, IRQ0), 1185 PINMUX_IPSR_DATA(IP9_5_3, IRQ0),
1187 PINMUX_IPSR_MODSEL_DATA(IP9_5_3, TS_SDATA, SEL_TSIF0_0), 1186 PINMUX_IPSR_MSEL(IP9_5_3, TS_SDATA, SEL_TSIF0_0),
1188 PINMUX_IPSR_DATA(IP9_5_3, DU1_DR4), 1187 PINMUX_IPSR_DATA(IP9_5_3, DU1_DR4),
1189 PINMUX_IPSR_MODSEL_DATA(IP9_5_3, RIF1_SYNC, SEL_DR2_0), 1188 PINMUX_IPSR_MSEL(IP9_5_3, RIF1_SYNC, SEL_DR2_0),
1190 PINMUX_IPSR_DATA(IP9_5_3, TPUTO1_C), 1189 PINMUX_IPSR_DATA(IP9_5_3, TPUTO1_C),
1191 PINMUX_IPSR_DATA(IP9_8_6, MSIOF0_SYNC), 1190 PINMUX_IPSR_DATA(IP9_8_6, MSIOF0_SYNC),
1192 PINMUX_IPSR_DATA(IP9_8_6, PWM1), 1191 PINMUX_IPSR_DATA(IP9_8_6, PWM1),
1193 PINMUX_IPSR_MODSEL_DATA(IP9_8_6, TS_SCK, SEL_TSIF0_0), 1192 PINMUX_IPSR_MSEL(IP9_8_6, TS_SCK, SEL_TSIF0_0),
1194 PINMUX_IPSR_DATA(IP9_8_6, DU1_DR5), 1193 PINMUX_IPSR_DATA(IP9_8_6, DU1_DR5),
1195 PINMUX_IPSR_MODSEL_DATA(IP9_8_6, RIF1_CLK, SEL_DR2_0), 1194 PINMUX_IPSR_MSEL(IP9_8_6, RIF1_CLK, SEL_DR2_0),
1196 PINMUX_IPSR_MODSEL_DATA(IP9_8_6, BPFCLK_B, SEL_DARC_1), 1195 PINMUX_IPSR_MSEL(IP9_8_6, BPFCLK_B, SEL_DARC_1),
1197 PINMUX_IPSR_DATA(IP9_11_9, MSIOF0_SS1), 1196 PINMUX_IPSR_DATA(IP9_11_9, MSIOF0_SS1),
1198 PINMUX_IPSR_MODSEL_DATA(IP9_11_9, SCIFA0_RXD, SEL_SCIFA0_0), 1197 PINMUX_IPSR_MSEL(IP9_11_9, SCIFA0_RXD, SEL_SCIFA0_0),
1199 PINMUX_IPSR_MODSEL_DATA(IP9_11_9, TS_SDEN, SEL_TSIF0_0), 1198 PINMUX_IPSR_MSEL(IP9_11_9, TS_SDEN, SEL_TSIF0_0),
1200 PINMUX_IPSR_DATA(IP9_11_9, DU1_DR6), 1199 PINMUX_IPSR_DATA(IP9_11_9, DU1_DR6),
1201 PINMUX_IPSR_MODSEL_DATA(IP9_11_9, RIF1_D0, SEL_DR2_0), 1200 PINMUX_IPSR_MSEL(IP9_11_9, RIF1_D0, SEL_DR2_0),
1202 PINMUX_IPSR_MODSEL_DATA(IP9_11_9, FMCLK_B, SEL_DARC_1), 1201 PINMUX_IPSR_MSEL(IP9_11_9, FMCLK_B, SEL_DARC_1),
1203 PINMUX_IPSR_MODSEL_DATA(IP9_11_9, RDS_CLK_B, SEL_RDS_1), 1202 PINMUX_IPSR_MSEL(IP9_11_9, RDS_CLK_B, SEL_RDS_1),
1204 PINMUX_IPSR_DATA(IP9_14_12, MSIOF0_SS2), 1203 PINMUX_IPSR_DATA(IP9_14_12, MSIOF0_SS2),
1205 PINMUX_IPSR_MODSEL_DATA(IP9_14_12, SCIFA0_TXD, SEL_SCIFA0_0), 1204 PINMUX_IPSR_MSEL(IP9_14_12, SCIFA0_TXD, SEL_SCIFA0_0),
1206 PINMUX_IPSR_MODSEL_DATA(IP9_14_12, TS_SPSYNC, SEL_TSIF0_0), 1205 PINMUX_IPSR_MSEL(IP9_14_12, TS_SPSYNC, SEL_TSIF0_0),
1207 PINMUX_IPSR_DATA(IP9_14_12, DU1_DR7), 1206 PINMUX_IPSR_DATA(IP9_14_12, DU1_DR7),
1208 PINMUX_IPSR_MODSEL_DATA(IP9_14_12, RIF1_D1, SEL_DR3_0), 1207 PINMUX_IPSR_MSEL(IP9_14_12, RIF1_D1, SEL_DR3_0),
1209 PINMUX_IPSR_MODSEL_DATA(IP9_14_12, FMIN_B, SEL_DARC_1), 1208 PINMUX_IPSR_MSEL(IP9_14_12, FMIN_B, SEL_DARC_1),
1210 PINMUX_IPSR_MODSEL_DATA(IP9_14_12, RDS_DATA_B, SEL_RDS_1), 1209 PINMUX_IPSR_MSEL(IP9_14_12, RDS_DATA_B, SEL_RDS_1),
1211 PINMUX_IPSR_MODSEL_DATA(IP9_16_15, HSCIF1_HRX, SEL_HSCIF1_0), 1210 PINMUX_IPSR_MSEL(IP9_16_15, HSCIF1_HRX, SEL_HSCIF1_0),
1212 PINMUX_IPSR_MODSEL_DATA(IP9_16_15, I2C4_SCL, SEL_I2C04_0), 1211 PINMUX_IPSR_MSEL(IP9_16_15, I2C4_SCL, SEL_I2C04_0),
1213 PINMUX_IPSR_DATA(IP9_16_15, PWM6), 1212 PINMUX_IPSR_DATA(IP9_16_15, PWM6),
1214 PINMUX_IPSR_DATA(IP9_16_15, DU1_DG0), 1213 PINMUX_IPSR_DATA(IP9_16_15, DU1_DG0),
1215 PINMUX_IPSR_MODSEL_DATA(IP9_18_17, HSCIF1_HTX, SEL_HSCIF1_0), 1214 PINMUX_IPSR_MSEL(IP9_18_17, HSCIF1_HTX, SEL_HSCIF1_0),
1216 PINMUX_IPSR_MODSEL_DATA(IP9_18_17, I2C4_SDA, SEL_I2C04_0), 1215 PINMUX_IPSR_MSEL(IP9_18_17, I2C4_SDA, SEL_I2C04_0),
1217 PINMUX_IPSR_DATA(IP9_18_17, TPUTO1), 1216 PINMUX_IPSR_DATA(IP9_18_17, TPUTO1),
1218 PINMUX_IPSR_DATA(IP9_18_17, DU1_DG1), 1217 PINMUX_IPSR_DATA(IP9_18_17, DU1_DG1),
1219 PINMUX_IPSR_DATA(IP9_21_19, HSCIF1_HSCK), 1218 PINMUX_IPSR_DATA(IP9_21_19, HSCIF1_HSCK),
1220 PINMUX_IPSR_DATA(IP9_21_19, PWM2), 1219 PINMUX_IPSR_DATA(IP9_21_19, PWM2),
1221 PINMUX_IPSR_MODSEL_DATA(IP9_21_19, IETX, SEL_IEB_0), 1220 PINMUX_IPSR_MSEL(IP9_21_19, IETX, SEL_IEB_0),
1222 PINMUX_IPSR_DATA(IP9_21_19, DU1_DG2), 1221 PINMUX_IPSR_DATA(IP9_21_19, DU1_DG2),
1223 PINMUX_IPSR_MODSEL_DATA(IP9_21_19, REMOCON_B, SEL_RCN_1), 1222 PINMUX_IPSR_MSEL(IP9_21_19, REMOCON_B, SEL_RCN_1),
1224 PINMUX_IPSR_MODSEL_DATA(IP9_21_19, SPEEDIN_B, SEL_RSP_1), 1223 PINMUX_IPSR_MSEL(IP9_21_19, SPEEDIN_B, SEL_RSP_1),
1225 PINMUX_IPSR_MODSEL_DATA(IP9_21_19, VSP_B, SEL_SPDM_1), 1224 PINMUX_IPSR_MSEL(IP9_21_19, VSP_B, SEL_SPDM_1),
1226 PINMUX_IPSR_MODSEL_DATA(IP9_24_22, HSCIF1_HCTS_N, SEL_HSCIF1_0), 1225 PINMUX_IPSR_MSEL(IP9_24_22, HSCIF1_HCTS_N, SEL_HSCIF1_0),
1227 PINMUX_IPSR_MODSEL_DATA(IP9_24_22, SCIFA4_RXD, SEL_SCIFA4_0), 1226 PINMUX_IPSR_MSEL(IP9_24_22, SCIFA4_RXD, SEL_SCIFA4_0),
1228 PINMUX_IPSR_MODSEL_DATA(IP9_24_22, IECLK, SEL_IEB_0), 1227 PINMUX_IPSR_MSEL(IP9_24_22, IECLK, SEL_IEB_0),
1229 PINMUX_IPSR_DATA(IP9_24_22, DU1_DG3), 1228 PINMUX_IPSR_DATA(IP9_24_22, DU1_DG3),
1230 PINMUX_IPSR_MODSEL_DATA(IP9_24_22, SSI_SCK1_B, SEL_SSI1_1), 1229 PINMUX_IPSR_MSEL(IP9_24_22, SSI_SCK1_B, SEL_SSI1_1),
1231 PINMUX_IPSR_DATA(IP9_24_22, CAN_DEBUG_HW_TRIGGER), 1230 PINMUX_IPSR_DATA(IP9_24_22, CAN_DEBUG_HW_TRIGGER),
1232 PINMUX_IPSR_DATA(IP9_24_22, CC50_STATE32), 1231 PINMUX_IPSR_DATA(IP9_24_22, CC50_STATE32),
1233 PINMUX_IPSR_MODSEL_DATA(IP9_27_25, HSCIF1_HRTS_N, SEL_HSCIF1_0), 1232 PINMUX_IPSR_MSEL(IP9_27_25, HSCIF1_HRTS_N, SEL_HSCIF1_0),
1234 PINMUX_IPSR_MODSEL_DATA(IP9_27_25, SCIFA4_TXD, SEL_SCIFA4_0), 1233 PINMUX_IPSR_MSEL(IP9_27_25, SCIFA4_TXD, SEL_SCIFA4_0),
1235 PINMUX_IPSR_MODSEL_DATA(IP9_27_25, IERX, SEL_IEB_0), 1234 PINMUX_IPSR_MSEL(IP9_27_25, IERX, SEL_IEB_0),
1236 PINMUX_IPSR_DATA(IP9_27_25, DU1_DG4), 1235 PINMUX_IPSR_DATA(IP9_27_25, DU1_DG4),
1237 PINMUX_IPSR_MODSEL_DATA(IP9_27_25, SSI_WS1_B, SEL_SSI1_1), 1236 PINMUX_IPSR_MSEL(IP9_27_25, SSI_WS1_B, SEL_SSI1_1),
1238 PINMUX_IPSR_DATA(IP9_27_25, CAN_STEP0), 1237 PINMUX_IPSR_DATA(IP9_27_25, CAN_STEP0),
1239 PINMUX_IPSR_DATA(IP9_27_25, CC50_STATE33), 1238 PINMUX_IPSR_DATA(IP9_27_25, CC50_STATE33),
1240 PINMUX_IPSR_MODSEL_DATA(IP9_30_28, SCIF1_SCK, SEL_SCIF1_0), 1239 PINMUX_IPSR_MSEL(IP9_30_28, SCIF1_SCK, SEL_SCIF1_0),
1241 PINMUX_IPSR_DATA(IP9_30_28, PWM3), 1240 PINMUX_IPSR_DATA(IP9_30_28, PWM3),
1242 PINMUX_IPSR_MODSEL_DATA(IP9_30_28, TCLK2, SEL_TMU_0), 1241 PINMUX_IPSR_MSEL(IP9_30_28, TCLK2, SEL_TMU_0),
1243 PINMUX_IPSR_DATA(IP9_30_28, DU1_DG5), 1242 PINMUX_IPSR_DATA(IP9_30_28, DU1_DG5),
1244 PINMUX_IPSR_MODSEL_DATA(IP9_30_28, SSI_SDATA1_B, SEL_SSI1_1), 1243 PINMUX_IPSR_MSEL(IP9_30_28, SSI_SDATA1_B, SEL_SSI1_1),
1245 PINMUX_IPSR_DATA(IP9_30_28, CAN_TXCLK), 1244 PINMUX_IPSR_DATA(IP9_30_28, CAN_TXCLK),
1246 PINMUX_IPSR_DATA(IP9_30_28, CC50_STATE34), 1245 PINMUX_IPSR_DATA(IP9_30_28, CC50_STATE34),
1247 1246
1248 /* IPSR10 */ 1247 /* IPSR10 */
1249 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SCIF1_RXD, SEL_SCIF1_0), 1248 PINMUX_IPSR_MSEL(IP10_2_0, SCIF1_RXD, SEL_SCIF1_0),
1250 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, IIC0_SCL, SEL_IIC00_0), 1249 PINMUX_IPSR_MSEL(IP10_2_0, IIC0_SCL, SEL_IIC00_0),
1251 PINMUX_IPSR_DATA(IP10_2_0, DU1_DG6), 1250 PINMUX_IPSR_DATA(IP10_2_0, DU1_DG6),
1252 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, SSI_SCK2_B, SEL_SSI2_1), 1251 PINMUX_IPSR_MSEL(IP10_2_0, SSI_SCK2_B, SEL_SSI2_1),
1253 PINMUX_IPSR_DATA(IP10_2_0, CAN_DEBUGOUT0), 1252 PINMUX_IPSR_DATA(IP10_2_0, CAN_DEBUGOUT0),
1254 PINMUX_IPSR_DATA(IP10_2_0, CC50_STATE35), 1253 PINMUX_IPSR_DATA(IP10_2_0, CC50_STATE35),
1255 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SCIF1_TXD, SEL_SCIF1_0), 1254 PINMUX_IPSR_MSEL(IP10_5_3, SCIF1_TXD, SEL_SCIF1_0),
1256 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, IIC0_SDA, SEL_IIC00_0), 1255 PINMUX_IPSR_MSEL(IP10_5_3, IIC0_SDA, SEL_IIC00_0),
1257 PINMUX_IPSR_DATA(IP10_5_3, DU1_DG7), 1256 PINMUX_IPSR_DATA(IP10_5_3, DU1_DG7),
1258 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, SSI_WS2_B, SEL_SSI2_1), 1257 PINMUX_IPSR_MSEL(IP10_5_3, SSI_WS2_B, SEL_SSI2_1),
1259 PINMUX_IPSR_DATA(IP10_5_3, CAN_DEBUGOUT1), 1258 PINMUX_IPSR_DATA(IP10_5_3, CAN_DEBUGOUT1),
1260 PINMUX_IPSR_DATA(IP10_5_3, CC50_STATE36), 1259 PINMUX_IPSR_DATA(IP10_5_3, CC50_STATE36),
1261 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, SCIF2_RXD, SEL_SCIF2_0), 1260 PINMUX_IPSR_MSEL(IP10_8_6, SCIF2_RXD, SEL_SCIF2_0),
1262 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, IIC1_SCL, SEL_IIC01_0), 1261 PINMUX_IPSR_MSEL(IP10_8_6, IIC1_SCL, SEL_IIC01_0),
1263 PINMUX_IPSR_DATA(IP10_8_6, DU1_DB0), 1262 PINMUX_IPSR_DATA(IP10_8_6, DU1_DB0),
1264 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, SSI_SDATA2_B, SEL_SSI2_1), 1263 PINMUX_IPSR_MSEL(IP10_8_6, SSI_SDATA2_B, SEL_SSI2_1),
1265 PINMUX_IPSR_DATA(IP10_8_6, USB0_EXTLP), 1264 PINMUX_IPSR_DATA(IP10_8_6, USB0_EXTLP),
1266 PINMUX_IPSR_DATA(IP10_8_6, CAN_DEBUGOUT2), 1265 PINMUX_IPSR_DATA(IP10_8_6, CAN_DEBUGOUT2),
1267 PINMUX_IPSR_DATA(IP10_8_6, CC50_STATE37), 1266 PINMUX_IPSR_DATA(IP10_8_6, CC50_STATE37),
1268 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SCIF2_TXD, SEL_SCIF2_0), 1267 PINMUX_IPSR_MSEL(IP10_11_9, SCIF2_TXD, SEL_SCIF2_0),
1269 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, IIC1_SDA, SEL_IIC01_0), 1268 PINMUX_IPSR_MSEL(IP10_11_9, IIC1_SDA, SEL_IIC01_0),
1270 PINMUX_IPSR_DATA(IP10_11_9, DU1_DB1), 1269 PINMUX_IPSR_DATA(IP10_11_9, DU1_DB1),
1271 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, SSI_SCK9_B, SEL_SSI9_1), 1270 PINMUX_IPSR_MSEL(IP10_11_9, SSI_SCK9_B, SEL_SSI9_1),
1272 PINMUX_IPSR_DATA(IP10_11_9, USB0_OVC1), 1271 PINMUX_IPSR_DATA(IP10_11_9, USB0_OVC1),
1273 PINMUX_IPSR_DATA(IP10_11_9, CAN_DEBUGOUT3), 1272 PINMUX_IPSR_DATA(IP10_11_9, CAN_DEBUGOUT3),
1274 PINMUX_IPSR_DATA(IP10_11_9, CC50_STATE38), 1273 PINMUX_IPSR_DATA(IP10_11_9, CC50_STATE38),
1275 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SCIF2_SCK, SEL_SCIF2_0), 1274 PINMUX_IPSR_MSEL(IP10_14_12, SCIF2_SCK, SEL_SCIF2_0),
1276 PINMUX_IPSR_DATA(IP10_14_12, IRQ1), 1275 PINMUX_IPSR_DATA(IP10_14_12, IRQ1),
1277 PINMUX_IPSR_DATA(IP10_14_12, DU1_DB2), 1276 PINMUX_IPSR_DATA(IP10_14_12, DU1_DB2),
1278 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SSI_WS9_B, SEL_SSI9_1), 1277 PINMUX_IPSR_MSEL(IP10_14_12, SSI_WS9_B, SEL_SSI9_1),
1279 PINMUX_IPSR_DATA(IP10_14_12, USB0_IDIN), 1278 PINMUX_IPSR_DATA(IP10_14_12, USB0_IDIN),
1280 PINMUX_IPSR_DATA(IP10_14_12, CAN_DEBUGOUT4), 1279 PINMUX_IPSR_DATA(IP10_14_12, CAN_DEBUGOUT4),
1281 PINMUX_IPSR_DATA(IP10_14_12, CC50_STATE39), 1280 PINMUX_IPSR_DATA(IP10_14_12, CC50_STATE39),
1282 PINMUX_IPSR_MODSEL_DATA(IP10_17_15, SCIF3_SCK, SEL_SCIF3_0), 1281 PINMUX_IPSR_MSEL(IP10_17_15, SCIF3_SCK, SEL_SCIF3_0),
1283 PINMUX_IPSR_DATA(IP10_17_15, IRQ2), 1282 PINMUX_IPSR_DATA(IP10_17_15, IRQ2),
1284 PINMUX_IPSR_MODSEL_DATA(IP10_17_15, BPFCLK_D, SEL_DARC_3), 1283 PINMUX_IPSR_MSEL(IP10_17_15, BPFCLK_D, SEL_DARC_3),
1285 PINMUX_IPSR_DATA(IP10_17_15, DU1_DB3), 1284 PINMUX_IPSR_DATA(IP10_17_15, DU1_DB3),
1286 PINMUX_IPSR_MODSEL_DATA(IP10_17_15, SSI_SDATA9_B, SEL_SSI9_1), 1285 PINMUX_IPSR_MSEL(IP10_17_15, SSI_SDATA9_B, SEL_SSI9_1),
1287 PINMUX_IPSR_DATA(IP10_17_15, TANS2), 1286 PINMUX_IPSR_DATA(IP10_17_15, TANS2),
1288 PINMUX_IPSR_DATA(IP10_17_15, CAN_DEBUGOUT5), 1287 PINMUX_IPSR_DATA(IP10_17_15, CAN_DEBUGOUT5),
1289 PINMUX_IPSR_DATA(IP10_17_15, CC50_OSCOUT), 1288 PINMUX_IPSR_DATA(IP10_17_15, CC50_OSCOUT),
1290 PINMUX_IPSR_MODSEL_DATA(IP10_20_18, SCIF3_RXD, SEL_SCIF3_0), 1289 PINMUX_IPSR_MSEL(IP10_20_18, SCIF3_RXD, SEL_SCIF3_0),
1291 PINMUX_IPSR_MODSEL_DATA(IP10_20_18, I2C1_SCL_E, SEL_I2C01_4), 1290 PINMUX_IPSR_MSEL(IP10_20_18, I2C1_SCL_E, SEL_I2C01_4),
1292 PINMUX_IPSR_MODSEL_DATA(IP10_20_18, FMCLK_D, SEL_DARC_3), 1291 PINMUX_IPSR_MSEL(IP10_20_18, FMCLK_D, SEL_DARC_3),
1293 PINMUX_IPSR_DATA(IP10_20_18, DU1_DB4), 1292 PINMUX_IPSR_DATA(IP10_20_18, DU1_DB4),
1294 PINMUX_IPSR_MODSEL_DATA(IP10_20_18, AUDIO_CLKA_C, SEL_ADG_2), 1293 PINMUX_IPSR_MSEL(IP10_20_18, AUDIO_CLKA_C, SEL_ADG_2),
1295 PINMUX_IPSR_MODSEL_DATA(IP10_20_18, SSI_SCK4_B, SEL_SSI4_1), 1294 PINMUX_IPSR_MSEL(IP10_20_18, SSI_SCK4_B, SEL_SSI4_1),
1296 PINMUX_IPSR_DATA(IP10_20_18, CAN_DEBUGOUT6), 1295 PINMUX_IPSR_DATA(IP10_20_18, CAN_DEBUGOUT6),
1297 PINMUX_IPSR_MODSEL_DATA(IP10_20_18, RDS_CLK_C, SEL_RDS_2), 1296 PINMUX_IPSR_MSEL(IP10_20_18, RDS_CLK_C, SEL_RDS_2),
1298 PINMUX_IPSR_MODSEL_DATA(IP10_23_21, SCIF3_TXD, SEL_SCIF3_0), 1297 PINMUX_IPSR_MSEL(IP10_23_21, SCIF3_TXD, SEL_SCIF3_0),
1299 PINMUX_IPSR_MODSEL_DATA(IP10_23_21, I2C1_SDA_E, SEL_I2C01_4), 1298 PINMUX_IPSR_MSEL(IP10_23_21, I2C1_SDA_E, SEL_I2C01_4),
1300 PINMUX_IPSR_MODSEL_DATA(IP10_23_21, FMIN_D, SEL_DARC_3), 1299 PINMUX_IPSR_MSEL(IP10_23_21, FMIN_D, SEL_DARC_3),
1301 PINMUX_IPSR_DATA(IP10_23_21, DU1_DB5), 1300 PINMUX_IPSR_DATA(IP10_23_21, DU1_DB5),
1302 PINMUX_IPSR_MODSEL_DATA(IP10_23_21, AUDIO_CLKB_C, SEL_ADG_2), 1301 PINMUX_IPSR_MSEL(IP10_23_21, AUDIO_CLKB_C, SEL_ADG_2),
1303 PINMUX_IPSR_MODSEL_DATA(IP10_23_21, SSI_WS4_B, SEL_SSI4_1), 1302 PINMUX_IPSR_MSEL(IP10_23_21, SSI_WS4_B, SEL_SSI4_1),
1304 PINMUX_IPSR_DATA(IP10_23_21, CAN_DEBUGOUT7), 1303 PINMUX_IPSR_DATA(IP10_23_21, CAN_DEBUGOUT7),
1305 PINMUX_IPSR_MODSEL_DATA(IP10_23_21, RDS_DATA_C, SEL_RDS_2), 1304 PINMUX_IPSR_MSEL(IP10_23_21, RDS_DATA_C, SEL_RDS_2),
1306 PINMUX_IPSR_MODSEL_DATA(IP10_26_24, I2C2_SCL, SEL_I2C02_0), 1305 PINMUX_IPSR_MSEL(IP10_26_24, I2C2_SCL, SEL_I2C02_0),
1307 PINMUX_IPSR_MODSEL_DATA(IP10_26_24, SCIFA5_RXD, SEL_SCIFA5_0), 1306 PINMUX_IPSR_MSEL(IP10_26_24, SCIFA5_RXD, SEL_SCIFA5_0),
1308 PINMUX_IPSR_DATA(IP10_26_24, DU1_DB6), 1307 PINMUX_IPSR_DATA(IP10_26_24, DU1_DB6),
1309 PINMUX_IPSR_MODSEL_DATA(IP10_26_24, AUDIO_CLKC_C, SEL_ADG_2), 1308 PINMUX_IPSR_MSEL(IP10_26_24, AUDIO_CLKC_C, SEL_ADG_2),
1310 PINMUX_IPSR_MODSEL_DATA(IP10_26_24, SSI_SDATA4_B, SEL_SSI4_1), 1309 PINMUX_IPSR_MSEL(IP10_26_24, SSI_SDATA4_B, SEL_SSI4_1),
1311 PINMUX_IPSR_DATA(IP10_26_24, CAN_DEBUGOUT8), 1310 PINMUX_IPSR_DATA(IP10_26_24, CAN_DEBUGOUT8),
1312 PINMUX_IPSR_MODSEL_DATA(IP10_29_27, I2C2_SDA, SEL_I2C02_0), 1311 PINMUX_IPSR_MSEL(IP10_29_27, I2C2_SDA, SEL_I2C02_0),
1313 PINMUX_IPSR_MODSEL_DATA(IP10_29_27, SCIFA5_TXD, SEL_SCIFA5_0), 1312 PINMUX_IPSR_MSEL(IP10_29_27, SCIFA5_TXD, SEL_SCIFA5_0),
1314 PINMUX_IPSR_DATA(IP10_29_27, DU1_DB7), 1313 PINMUX_IPSR_DATA(IP10_29_27, DU1_DB7),
1315 PINMUX_IPSR_MODSEL_DATA(IP10_29_27, AUDIO_CLKOUT_C, SEL_ADG_2), 1314 PINMUX_IPSR_MSEL(IP10_29_27, AUDIO_CLKOUT_C, SEL_ADG_2),
1316 PINMUX_IPSR_DATA(IP10_29_27, CAN_DEBUGOUT9), 1315 PINMUX_IPSR_DATA(IP10_29_27, CAN_DEBUGOUT9),
1317 PINMUX_IPSR_MODSEL_DATA(IP10_31_30, SSI_SCK5, SEL_SSI5_0), 1316 PINMUX_IPSR_MSEL(IP10_31_30, SSI_SCK5, SEL_SSI5_0),
1318 PINMUX_IPSR_MODSEL_DATA(IP10_31_30, SCIFA3_SCK, SEL_SCIFA3_0), 1317 PINMUX_IPSR_MSEL(IP10_31_30, SCIFA3_SCK, SEL_SCIFA3_0),
1319 PINMUX_IPSR_DATA(IP10_31_30, DU1_DOTCLKIN), 1318 PINMUX_IPSR_DATA(IP10_31_30, DU1_DOTCLKIN),
1320 PINMUX_IPSR_DATA(IP10_31_30, CAN_DEBUGOUT10), 1319 PINMUX_IPSR_DATA(IP10_31_30, CAN_DEBUGOUT10),
1321 1320
1322 /* IPSR11 */ 1321 /* IPSR11 */
1323 PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SSI_WS5, SEL_SSI5_0), 1322 PINMUX_IPSR_MSEL(IP11_2_0, SSI_WS5, SEL_SSI5_0),
1324 PINMUX_IPSR_MODSEL_DATA(IP11_2_0, SCIFA3_RXD, SEL_SCIFA3_0), 1323 PINMUX_IPSR_MSEL(IP11_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
1325 PINMUX_IPSR_MODSEL_DATA(IP11_2_0, I2C3_SCL_C, SEL_I2C03_2), 1324 PINMUX_IPSR_MSEL(IP11_2_0, I2C3_SCL_C, SEL_I2C03_2),
1326 PINMUX_IPSR_DATA(IP11_2_0, DU1_DOTCLKOUT0), 1325 PINMUX_IPSR_DATA(IP11_2_0, DU1_DOTCLKOUT0),
1327 PINMUX_IPSR_DATA(IP11_2_0, CAN_DEBUGOUT11), 1326 PINMUX_IPSR_DATA(IP11_2_0, CAN_DEBUGOUT11),
1328 PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SSI_SDATA5, SEL_SSI5_0), 1327 PINMUX_IPSR_MSEL(IP11_5_3, SSI_SDATA5, SEL_SSI5_0),
1329 PINMUX_IPSR_MODSEL_DATA(IP11_5_3, SCIFA3_TXD, SEL_SCIFA3_0), 1328 PINMUX_IPSR_MSEL(IP11_5_3, SCIFA3_TXD, SEL_SCIFA3_0),
1330 PINMUX_IPSR_MODSEL_DATA(IP11_5_3, I2C3_SDA_C, SEL_I2C03_2), 1329 PINMUX_IPSR_MSEL(IP11_5_3, I2C3_SDA_C, SEL_I2C03_2),
1331 PINMUX_IPSR_DATA(IP11_5_3, DU1_DOTCLKOUT1), 1330 PINMUX_IPSR_DATA(IP11_5_3, DU1_DOTCLKOUT1),
1332 PINMUX_IPSR_DATA(IP11_5_3, CAN_DEBUGOUT12), 1331 PINMUX_IPSR_DATA(IP11_5_3, CAN_DEBUGOUT12),
1333 PINMUX_IPSR_MODSEL_DATA(IP11_7_6, SSI_SCK6, SEL_SSI6_0), 1332 PINMUX_IPSR_MSEL(IP11_7_6, SSI_SCK6, SEL_SSI6_0),
1334 PINMUX_IPSR_MODSEL_DATA(IP11_7_6, SCIFA1_SCK_B, SEL_SCIFA1_1), 1333 PINMUX_IPSR_MSEL(IP11_7_6, SCIFA1_SCK_B, SEL_SCIFA1_1),
1335 PINMUX_IPSR_DATA(IP11_7_6, DU1_EXHSYNC_DU1_HSYNC), 1334 PINMUX_IPSR_DATA(IP11_7_6, DU1_EXHSYNC_DU1_HSYNC),
1336 PINMUX_IPSR_DATA(IP11_7_6, CAN_DEBUGOUT13), 1335 PINMUX_IPSR_DATA(IP11_7_6, CAN_DEBUGOUT13),
1337 PINMUX_IPSR_MODSEL_DATA(IP11_10_8, SSI_WS6, SEL_SSI6_0), 1336 PINMUX_IPSR_MSEL(IP11_10_8, SSI_WS6, SEL_SSI6_0),
1338 PINMUX_IPSR_MODSEL_DATA(IP11_10_8, SCIFA1_RXD_B, SEL_SCIFA1_1), 1337 PINMUX_IPSR_MSEL(IP11_10_8, SCIFA1_RXD_B, SEL_SCIFA1_1),
1339 PINMUX_IPSR_MODSEL_DATA(IP11_10_8, I2C4_SCL_C, SEL_I2C04_2), 1338 PINMUX_IPSR_MSEL(IP11_10_8, I2C4_SCL_C, SEL_I2C04_2),
1340 PINMUX_IPSR_DATA(IP11_10_8, DU1_EXVSYNC_DU1_VSYNC), 1339 PINMUX_IPSR_DATA(IP11_10_8, DU1_EXVSYNC_DU1_VSYNC),
1341 PINMUX_IPSR_DATA(IP11_10_8, CAN_DEBUGOUT14), 1340 PINMUX_IPSR_DATA(IP11_10_8, CAN_DEBUGOUT14),
1342 PINMUX_IPSR_MODSEL_DATA(IP11_13_11, SSI_SDATA6, SEL_SSI6_0), 1341 PINMUX_IPSR_MSEL(IP11_13_11, SSI_SDATA6, SEL_SSI6_0),
1343 PINMUX_IPSR_MODSEL_DATA(IP11_13_11, SCIFA1_TXD_B, SEL_SCIFA1_1), 1342 PINMUX_IPSR_MSEL(IP11_13_11, SCIFA1_TXD_B, SEL_SCIFA1_1),
1344 PINMUX_IPSR_MODSEL_DATA(IP11_13_11, I2C4_SDA_C, SEL_I2C04_2), 1343 PINMUX_IPSR_MSEL(IP11_13_11, I2C4_SDA_C, SEL_I2C04_2),
1345 PINMUX_IPSR_DATA(IP11_13_11, DU1_EXODDF_DU1_ODDF_DISP_CDE), 1344 PINMUX_IPSR_DATA(IP11_13_11, DU1_EXODDF_DU1_ODDF_DISP_CDE),
1346 PINMUX_IPSR_DATA(IP11_13_11, CAN_DEBUGOUT15), 1345 PINMUX_IPSR_DATA(IP11_13_11, CAN_DEBUGOUT15),
1347 PINMUX_IPSR_MODSEL_DATA(IP11_15_14, SSI_SCK78, SEL_SSI7_0), 1346 PINMUX_IPSR_MSEL(IP11_15_14, SSI_SCK78, SEL_SSI7_0),
1348 PINMUX_IPSR_MODSEL_DATA(IP11_15_14, SCIFA2_SCK_B, SEL_SCIFA2_1), 1347 PINMUX_IPSR_MSEL(IP11_15_14, SCIFA2_SCK_B, SEL_SCIFA2_1),
1349 PINMUX_IPSR_MODSEL_DATA(IP11_15_14, IIC0_SDA_C, SEL_IIC00_2), 1348 PINMUX_IPSR_MSEL(IP11_15_14, IIC0_SDA_C, SEL_IIC00_2),
1350 PINMUX_IPSR_DATA(IP11_15_14, DU1_DISP), 1349 PINMUX_IPSR_DATA(IP11_15_14, DU1_DISP),
1351 PINMUX_IPSR_MODSEL_DATA(IP11_17_16, SSI_WS78, SEL_SSI7_0), 1350 PINMUX_IPSR_MSEL(IP11_17_16, SSI_WS78, SEL_SSI7_0),
1352 PINMUX_IPSR_MODSEL_DATA(IP11_17_16, SCIFA2_RXD_B, SEL_SCIFA2_1), 1351 PINMUX_IPSR_MSEL(IP11_17_16, SCIFA2_RXD_B, SEL_SCIFA2_1),
1353 PINMUX_IPSR_MODSEL_DATA(IP11_17_16, IIC0_SCL_C, SEL_IIC00_2), 1352 PINMUX_IPSR_MSEL(IP11_17_16, IIC0_SCL_C, SEL_IIC00_2),
1354 PINMUX_IPSR_DATA(IP11_17_16, DU1_CDE), 1353 PINMUX_IPSR_DATA(IP11_17_16, DU1_CDE),
1355 PINMUX_IPSR_MODSEL_DATA(IP11_20_18, SSI_SDATA7, SEL_SSI7_0), 1354 PINMUX_IPSR_MSEL(IP11_20_18, SSI_SDATA7, SEL_SSI7_0),
1356 PINMUX_IPSR_MODSEL_DATA(IP11_20_18, SCIFA2_TXD_B, SEL_SCIFA2_1), 1355 PINMUX_IPSR_MSEL(IP11_20_18, SCIFA2_TXD_B, SEL_SCIFA2_1),
1357 PINMUX_IPSR_DATA(IP11_20_18, IRQ8), 1356 PINMUX_IPSR_DATA(IP11_20_18, IRQ8),
1358 PINMUX_IPSR_MODSEL_DATA(IP11_20_18, AUDIO_CLKA_D, SEL_ADG_3), 1357 PINMUX_IPSR_MSEL(IP11_20_18, AUDIO_CLKA_D, SEL_ADG_3),
1359 PINMUX_IPSR_MODSEL_DATA(IP11_20_18, CAN_CLK_D, SEL_CAN_3), 1358 PINMUX_IPSR_MSEL(IP11_20_18, CAN_CLK_D, SEL_CAN_3),
1360 PINMUX_IPSR_DATA(IP11_20_18, PCMOE_N), 1359 PINMUX_IPSR_DATA(IP11_20_18, PCMOE_N),
1361 PINMUX_IPSR_DATA(IP11_23_21, SSI_SCK0129), 1360 PINMUX_IPSR_DATA(IP11_23_21, SSI_SCK0129),
1362 PINMUX_IPSR_MODSEL_DATA(IP11_23_21, MSIOF1_RXD_B, SEL_MSI1_1), 1361 PINMUX_IPSR_MSEL(IP11_23_21, MSIOF1_RXD_B, SEL_MSI1_1),
1363 PINMUX_IPSR_MODSEL_DATA(IP11_23_21, SCIF5_RXD_D, SEL_SCIF5_3), 1362 PINMUX_IPSR_MSEL(IP11_23_21, SCIF5_RXD_D, SEL_SCIF5_3),
1364 PINMUX_IPSR_MODSEL_DATA(IP11_23_21, ADIDATA_B, SEL_RAD_1), 1363 PINMUX_IPSR_MSEL(IP11_23_21, ADIDATA_B, SEL_RAD_1),
1365 PINMUX_IPSR_MODSEL_DATA(IP11_23_21, AD_DI_B, SEL_ADI_1), 1364 PINMUX_IPSR_MSEL(IP11_23_21, AD_DI_B, SEL_ADI_1),
1366 PINMUX_IPSR_DATA(IP11_23_21, PCMWE_N), 1365 PINMUX_IPSR_DATA(IP11_23_21, PCMWE_N),
1367 PINMUX_IPSR_DATA(IP11_26_24, SSI_WS0129), 1366 PINMUX_IPSR_DATA(IP11_26_24, SSI_WS0129),
1368 PINMUX_IPSR_MODSEL_DATA(IP11_26_24, MSIOF1_TXD_B, SEL_MSI1_1), 1367 PINMUX_IPSR_MSEL(IP11_26_24, MSIOF1_TXD_B, SEL_MSI1_1),
1369 PINMUX_IPSR_MODSEL_DATA(IP11_26_24, SCIF5_TXD_D, SEL_SCIF5_3), 1368 PINMUX_IPSR_MSEL(IP11_26_24, SCIF5_TXD_D, SEL_SCIF5_3),
1370 PINMUX_IPSR_MODSEL_DATA(IP11_26_24, ADICS_SAMP_B, SEL_RAD_1), 1369 PINMUX_IPSR_MSEL(IP11_26_24, ADICS_SAMP_B, SEL_RAD_1),
1371 PINMUX_IPSR_MODSEL_DATA(IP11_26_24, AD_DO_B, SEL_ADI_1), 1370 PINMUX_IPSR_MSEL(IP11_26_24, AD_DO_B, SEL_ADI_1),
1372 PINMUX_IPSR_DATA(IP11_29_27, SSI_SDATA0), 1371 PINMUX_IPSR_DATA(IP11_29_27, SSI_SDATA0),
1373 PINMUX_IPSR_MODSEL_DATA(IP11_29_27, MSIOF1_SCK_B, SEL_MSI1_1), 1372 PINMUX_IPSR_MSEL(IP11_29_27, MSIOF1_SCK_B, SEL_MSI1_1),
1374 PINMUX_IPSR_DATA(IP11_29_27, PWM0_B), 1373 PINMUX_IPSR_DATA(IP11_29_27, PWM0_B),
1375 PINMUX_IPSR_MODSEL_DATA(IP11_29_27, ADICLK_B, SEL_RAD_1), 1374 PINMUX_IPSR_MSEL(IP11_29_27, ADICLK_B, SEL_RAD_1),
1376 PINMUX_IPSR_MODSEL_DATA(IP11_29_27, AD_CLK_B, SEL_ADI_1), 1375 PINMUX_IPSR_MSEL(IP11_29_27, AD_CLK_B, SEL_ADI_1),
1377 1376
1378 /* IPSR12 */ 1377 /* IPSR12 */
1379 PINMUX_IPSR_DATA(IP12_2_0, SSI_SCK34), 1378 PINMUX_IPSR_DATA(IP12_2_0, SSI_SCK34),
1380 PINMUX_IPSR_MODSEL_DATA(IP12_2_0, MSIOF1_SYNC_B, SEL_MSI1_1), 1379 PINMUX_IPSR_MSEL(IP12_2_0, MSIOF1_SYNC_B, SEL_MSI1_1),
1381 PINMUX_IPSR_MODSEL_DATA(IP12_2_0, SCIFA1_SCK_C, SEL_SCIFA1_2), 1380 PINMUX_IPSR_MSEL(IP12_2_0, SCIFA1_SCK_C, SEL_SCIFA1_2),
1382 PINMUX_IPSR_MODSEL_DATA(IP12_2_0, ADICHS0_B, SEL_RAD_1), 1381 PINMUX_IPSR_MSEL(IP12_2_0, ADICHS0_B, SEL_RAD_1),
1383 PINMUX_IPSR_MODSEL_DATA(IP12_2_0, AD_NCS_N_B, SEL_ADI_1), 1382 PINMUX_IPSR_MSEL(IP12_2_0, AD_NCS_N_B, SEL_ADI_1),
1384 PINMUX_IPSR_MODSEL_DATA(IP12_2_0, DREQ1_N_B, SEL_LBS_1), 1383 PINMUX_IPSR_MSEL(IP12_2_0, DREQ1_N_B, SEL_LBS_1),
1385 PINMUX_IPSR_DATA(IP12_5_3, SSI_WS34), 1384 PINMUX_IPSR_DATA(IP12_5_3, SSI_WS34),
1386 PINMUX_IPSR_MODSEL_DATA(IP12_5_3, MSIOF1_SS1_B, SEL_MSI1_1), 1385 PINMUX_IPSR_MSEL(IP12_5_3, MSIOF1_SS1_B, SEL_MSI1_1),
1387 PINMUX_IPSR_MODSEL_DATA(IP12_5_3, SCIFA1_RXD_C, SEL_SCIFA1_2), 1386 PINMUX_IPSR_MSEL(IP12_5_3, SCIFA1_RXD_C, SEL_SCIFA1_2),
1388 PINMUX_IPSR_MODSEL_DATA(IP12_5_3, ADICHS1_B, SEL_RAD_1), 1387 PINMUX_IPSR_MSEL(IP12_5_3, ADICHS1_B, SEL_RAD_1),
1389 PINMUX_IPSR_MODSEL_DATA(IP12_5_3, CAN1_RX_C, SEL_CAN1_2), 1388 PINMUX_IPSR_MSEL(IP12_5_3, CAN1_RX_C, SEL_CAN1_2),
1390 PINMUX_IPSR_MODSEL_DATA(IP12_5_3, DACK1_B, SEL_LBS_1), 1389 PINMUX_IPSR_MSEL(IP12_5_3, DACK1_B, SEL_LBS_1),
1391 PINMUX_IPSR_DATA(IP12_8_6, SSI_SDATA3), 1390 PINMUX_IPSR_DATA(IP12_8_6, SSI_SDATA3),
1392 PINMUX_IPSR_MODSEL_DATA(IP12_8_6, MSIOF1_SS2_B, SEL_MSI1_1), 1391 PINMUX_IPSR_MSEL(IP12_8_6, MSIOF1_SS2_B, SEL_MSI1_1),
1393 PINMUX_IPSR_MODSEL_DATA(IP12_8_6, SCIFA1_TXD_C, SEL_SCIFA1_2), 1392 PINMUX_IPSR_MSEL(IP12_8_6, SCIFA1_TXD_C, SEL_SCIFA1_2),
1394 PINMUX_IPSR_MODSEL_DATA(IP12_8_6, ADICHS2_B, SEL_RAD_1), 1393 PINMUX_IPSR_MSEL(IP12_8_6, ADICHS2_B, SEL_RAD_1),
1395 PINMUX_IPSR_MODSEL_DATA(IP12_8_6, CAN1_TX_C, SEL_CAN1_2), 1394 PINMUX_IPSR_MSEL(IP12_8_6, CAN1_TX_C, SEL_CAN1_2),
1396 PINMUX_IPSR_DATA(IP12_8_6, DREQ2_N), 1395 PINMUX_IPSR_DATA(IP12_8_6, DREQ2_N),
1397 PINMUX_IPSR_MODSEL_DATA(IP12_10_9, SSI_SCK4, SEL_SSI4_0), 1396 PINMUX_IPSR_MSEL(IP12_10_9, SSI_SCK4, SEL_SSI4_0),
1398 PINMUX_IPSR_DATA(IP12_10_9, MLB_CLK), 1397 PINMUX_IPSR_DATA(IP12_10_9, MLB_CLK),
1399 PINMUX_IPSR_MODSEL_DATA(IP12_10_9, IETX_B, SEL_IEB_1), 1398 PINMUX_IPSR_MSEL(IP12_10_9, IETX_B, SEL_IEB_1),
1400 PINMUX_IPSR_DATA(IP12_10_9, IRD_TX), 1399 PINMUX_IPSR_DATA(IP12_10_9, IRD_TX),
1401 PINMUX_IPSR_MODSEL_DATA(IP12_12_11, SSI_WS4, SEL_SSI4_0), 1400 PINMUX_IPSR_MSEL(IP12_12_11, SSI_WS4, SEL_SSI4_0),
1402 PINMUX_IPSR_DATA(IP12_12_11, MLB_SIG), 1401 PINMUX_IPSR_DATA(IP12_12_11, MLB_SIG),
1403 PINMUX_IPSR_MODSEL_DATA(IP12_12_11, IECLK_B, SEL_IEB_1), 1402 PINMUX_IPSR_MSEL(IP12_12_11, IECLK_B, SEL_IEB_1),
1404 PINMUX_IPSR_DATA(IP12_12_11, IRD_RX), 1403 PINMUX_IPSR_DATA(IP12_12_11, IRD_RX),
1405 PINMUX_IPSR_MODSEL_DATA(IP12_14_13, SSI_SDATA4, SEL_SSI4_0), 1404 PINMUX_IPSR_MSEL(IP12_14_13, SSI_SDATA4, SEL_SSI4_0),
1406 PINMUX_IPSR_DATA(IP12_14_13, MLB_DAT), 1405 PINMUX_IPSR_DATA(IP12_14_13, MLB_DAT),
1407 PINMUX_IPSR_MODSEL_DATA(IP12_14_13, IERX_B, SEL_IEB_1), 1406 PINMUX_IPSR_MSEL(IP12_14_13, IERX_B, SEL_IEB_1),
1408 PINMUX_IPSR_DATA(IP12_14_13, IRD_SCK), 1407 PINMUX_IPSR_DATA(IP12_14_13, IRD_SCK),
1409 PINMUX_IPSR_MODSEL_DATA(IP12_17_15, SSI_SDATA8, SEL_SSI8_0), 1408 PINMUX_IPSR_MSEL(IP12_17_15, SSI_SDATA8, SEL_SSI8_0),
1410 PINMUX_IPSR_MODSEL_DATA(IP12_17_15, SCIF1_SCK_B, SEL_SCIF1_1), 1409 PINMUX_IPSR_MSEL(IP12_17_15, SCIF1_SCK_B, SEL_SCIF1_1),
1411 PINMUX_IPSR_DATA(IP12_17_15, PWM1_B), 1410 PINMUX_IPSR_DATA(IP12_17_15, PWM1_B),
1412 PINMUX_IPSR_DATA(IP12_17_15, IRQ9), 1411 PINMUX_IPSR_DATA(IP12_17_15, IRQ9),
1413 PINMUX_IPSR_MODSEL_DATA(IP12_17_15, REMOCON, SEL_RCN_0), 1412 PINMUX_IPSR_MSEL(IP12_17_15, REMOCON, SEL_RCN_0),
1414 PINMUX_IPSR_DATA(IP12_17_15, DACK2), 1413 PINMUX_IPSR_DATA(IP12_17_15, DACK2),
1415 PINMUX_IPSR_MODSEL_DATA(IP12_17_15, ETH_MDIO_B, SEL_ETH_1), 1414 PINMUX_IPSR_MSEL(IP12_17_15, ETH_MDIO_B, SEL_ETH_1),
1416 PINMUX_IPSR_MODSEL_DATA(IP12_20_18, SSI_SCK1, SEL_SSI1_0), 1415 PINMUX_IPSR_MSEL(IP12_20_18, SSI_SCK1, SEL_SSI1_0),
1417 PINMUX_IPSR_MODSEL_DATA(IP12_20_18, SCIF1_RXD_B, SEL_SCIF1_1), 1416 PINMUX_IPSR_MSEL(IP12_20_18, SCIF1_RXD_B, SEL_SCIF1_1),
1418 PINMUX_IPSR_MODSEL_DATA(IP12_20_18, IIC1_SCL_C, SEL_IIC01_2), 1417 PINMUX_IPSR_MSEL(IP12_20_18, IIC1_SCL_C, SEL_IIC01_2),
1419 PINMUX_IPSR_DATA(IP12_20_18, VI1_CLK), 1418 PINMUX_IPSR_DATA(IP12_20_18, VI1_CLK),
1420 PINMUX_IPSR_MODSEL_DATA(IP12_20_18, CAN0_RX_D, SEL_CAN0_3), 1419 PINMUX_IPSR_MSEL(IP12_20_18, CAN0_RX_D, SEL_CAN0_3),
1421 PINMUX_IPSR_MODSEL_DATA(IP12_20_18, AVB_AVTP_CAPTURE, SEL_AVB_0), 1420 PINMUX_IPSR_MSEL(IP12_20_18, AVB_AVTP_CAPTURE, SEL_AVB_0),
1422 PINMUX_IPSR_MODSEL_DATA(IP12_20_18, ETH_CRS_DV_B, SEL_ETH_1), 1421 PINMUX_IPSR_MSEL(IP12_20_18, ETH_CRS_DV_B, SEL_ETH_1),
1423 PINMUX_IPSR_MODSEL_DATA(IP12_23_21, SSI_WS1, SEL_SSI1_0), 1422 PINMUX_IPSR_MSEL(IP12_23_21, SSI_WS1, SEL_SSI1_0),
1424 PINMUX_IPSR_MODSEL_DATA(IP12_23_21, SCIF1_TXD_B, SEL_SCIF1_1), 1423 PINMUX_IPSR_MSEL(IP12_23_21, SCIF1_TXD_B, SEL_SCIF1_1),
1425 PINMUX_IPSR_MODSEL_DATA(IP12_23_21, IIC1_SDA_C, SEL_IIC01_2), 1424 PINMUX_IPSR_MSEL(IP12_23_21, IIC1_SDA_C, SEL_IIC01_2),
1426 PINMUX_IPSR_DATA(IP12_23_21, VI1_DATA0), 1425 PINMUX_IPSR_DATA(IP12_23_21, VI1_DATA0),
1427 PINMUX_IPSR_MODSEL_DATA(IP12_23_21, CAN0_TX_D, SEL_CAN0_3), 1426 PINMUX_IPSR_MSEL(IP12_23_21, CAN0_TX_D, SEL_CAN0_3),
1428 PINMUX_IPSR_MODSEL_DATA(IP12_23_21, AVB_AVTP_MATCH, SEL_AVB_0), 1427 PINMUX_IPSR_MSEL(IP12_23_21, AVB_AVTP_MATCH, SEL_AVB_0),
1429 PINMUX_IPSR_MODSEL_DATA(IP12_23_21, ETH_RX_ER_B, SEL_ETH_1), 1428 PINMUX_IPSR_MSEL(IP12_23_21, ETH_RX_ER_B, SEL_ETH_1),
1430 PINMUX_IPSR_MODSEL_DATA(IP12_26_24, SSI_SDATA1, SEL_SSI1_0), 1429 PINMUX_IPSR_MSEL(IP12_26_24, SSI_SDATA1, SEL_SSI1_0),
1431 PINMUX_IPSR_MODSEL_DATA(IP12_26_24, HSCIF1_HRX_B, SEL_HSCIF1_1), 1430 PINMUX_IPSR_MSEL(IP12_26_24, HSCIF1_HRX_B, SEL_HSCIF1_1),
1432 PINMUX_IPSR_DATA(IP12_26_24, VI1_DATA1), 1431 PINMUX_IPSR_DATA(IP12_26_24, VI1_DATA1),
1433 PINMUX_IPSR_MODSEL_DATA(IP12_26_24, SDATA, SEL_FSN_0), 1432 PINMUX_IPSR_MSEL(IP12_26_24, SDATA, SEL_FSN_0),
1434 PINMUX_IPSR_DATA(IP12_26_24, ATAG0_N), 1433 PINMUX_IPSR_DATA(IP12_26_24, ATAG0_N),
1435 PINMUX_IPSR_MODSEL_DATA(IP12_26_24, ETH_RXD0_B, SEL_ETH_1), 1434 PINMUX_IPSR_MSEL(IP12_26_24, ETH_RXD0_B, SEL_ETH_1),
1436 PINMUX_IPSR_MODSEL_DATA(IP12_29_27, SSI_SCK2, SEL_SSI2_0), 1435 PINMUX_IPSR_MSEL(IP12_29_27, SSI_SCK2, SEL_SSI2_0),
1437 PINMUX_IPSR_MODSEL_DATA(IP12_29_27, HSCIF1_HTX_B, SEL_HSCIF1_1), 1436 PINMUX_IPSR_MSEL(IP12_29_27, HSCIF1_HTX_B, SEL_HSCIF1_1),
1438 PINMUX_IPSR_DATA(IP12_29_27, VI1_DATA2), 1437 PINMUX_IPSR_DATA(IP12_29_27, VI1_DATA2),
1439 PINMUX_IPSR_MODSEL_DATA(IP12_29_27, MDATA, SEL_FSN_0), 1438 PINMUX_IPSR_MSEL(IP12_29_27, MDATA, SEL_FSN_0),
1440 PINMUX_IPSR_DATA(IP12_29_27, ATAWR0_N), 1439 PINMUX_IPSR_DATA(IP12_29_27, ATAWR0_N),
1441 PINMUX_IPSR_MODSEL_DATA(IP12_29_27, ETH_RXD1_B, SEL_ETH_1), 1440 PINMUX_IPSR_MSEL(IP12_29_27, ETH_RXD1_B, SEL_ETH_1),
1442 1441
1443 /* IPSR13 */ 1442 /* IPSR13 */
1444 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SSI_WS2, SEL_SSI2_0), 1443 PINMUX_IPSR_MSEL(IP13_2_0, SSI_WS2, SEL_SSI2_0),
1445 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, HSCIF1_HCTS_N_B, SEL_HSCIF1_1), 1444 PINMUX_IPSR_MSEL(IP13_2_0, HSCIF1_HCTS_N_B, SEL_HSCIF1_1),
1446 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SCIFA0_RXD_D, SEL_SCIFA0_3), 1445 PINMUX_IPSR_MSEL(IP13_2_0, SCIFA0_RXD_D, SEL_SCIFA0_3),
1447 PINMUX_IPSR_DATA(IP13_2_0, VI1_DATA3), 1446 PINMUX_IPSR_DATA(IP13_2_0, VI1_DATA3),
1448 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, SCKZ, SEL_FSN_0), 1447 PINMUX_IPSR_MSEL(IP13_2_0, SCKZ, SEL_FSN_0),
1449 PINMUX_IPSR_DATA(IP13_2_0, ATACS00_N), 1448 PINMUX_IPSR_DATA(IP13_2_0, ATACS00_N),
1450 PINMUX_IPSR_MODSEL_DATA(IP13_2_0, ETH_LINK_B, SEL_ETH_1), 1449 PINMUX_IPSR_MSEL(IP13_2_0, ETH_LINK_B, SEL_ETH_1),
1451 PINMUX_IPSR_MODSEL_DATA(IP13_5_3, SSI_SDATA2, SEL_SSI2_0), 1450 PINMUX_IPSR_MSEL(IP13_5_3, SSI_SDATA2, SEL_SSI2_0),
1452 PINMUX_IPSR_MODSEL_DATA(IP13_5_3, HSCIF1_HRTS_N_B, SEL_HSCIF1_1), 1451 PINMUX_IPSR_MSEL(IP13_5_3, HSCIF1_HRTS_N_B, SEL_HSCIF1_1),
1453 PINMUX_IPSR_MODSEL_DATA(IP13_5_3, SCIFA0_TXD_D, SEL_SCIFA0_3), 1452 PINMUX_IPSR_MSEL(IP13_5_3, SCIFA0_TXD_D, SEL_SCIFA0_3),
1454 PINMUX_IPSR_DATA(IP13_5_3, VI1_DATA4), 1453 PINMUX_IPSR_DATA(IP13_5_3, VI1_DATA4),
1455 PINMUX_IPSR_MODSEL_DATA(IP13_5_3, STM_N, SEL_FSN_0), 1454 PINMUX_IPSR_MSEL(IP13_5_3, STM_N, SEL_FSN_0),
1456 PINMUX_IPSR_DATA(IP13_5_3, ATACS10_N), 1455 PINMUX_IPSR_DATA(IP13_5_3, ATACS10_N),
1457 PINMUX_IPSR_MODSEL_DATA(IP13_5_3, ETH_REFCLK_B, SEL_ETH_1), 1456 PINMUX_IPSR_MSEL(IP13_5_3, ETH_REFCLK_B, SEL_ETH_1),
1458 PINMUX_IPSR_MODSEL_DATA(IP13_8_6, SSI_SCK9, SEL_SSI9_0), 1457 PINMUX_IPSR_MSEL(IP13_8_6, SSI_SCK9, SEL_SSI9_0),
1459 PINMUX_IPSR_MODSEL_DATA(IP13_8_6, SCIF2_SCK_B, SEL_SCIF2_1), 1458 PINMUX_IPSR_MSEL(IP13_8_6, SCIF2_SCK_B, SEL_SCIF2_1),
1460 PINMUX_IPSR_DATA(IP13_8_6, PWM2_B), 1459 PINMUX_IPSR_DATA(IP13_8_6, PWM2_B),
1461 PINMUX_IPSR_DATA(IP13_8_6, VI1_DATA5), 1460 PINMUX_IPSR_DATA(IP13_8_6, VI1_DATA5),
1462 PINMUX_IPSR_MODSEL_DATA(IP13_8_6, MTS_N, SEL_FSN_0), 1461 PINMUX_IPSR_MSEL(IP13_8_6, MTS_N, SEL_FSN_0),
1463 PINMUX_IPSR_DATA(IP13_8_6, EX_WAIT1), 1462 PINMUX_IPSR_DATA(IP13_8_6, EX_WAIT1),
1464 PINMUX_IPSR_MODSEL_DATA(IP13_8_6, ETH_TXD1_B, SEL_ETH_1), 1463 PINMUX_IPSR_MSEL(IP13_8_6, ETH_TXD1_B, SEL_ETH_1),
1465 PINMUX_IPSR_MODSEL_DATA(IP13_11_9, SSI_WS9, SEL_SSI9_0), 1464 PINMUX_IPSR_MSEL(IP13_11_9, SSI_WS9, SEL_SSI9_0),
1466 PINMUX_IPSR_MODSEL_DATA(IP13_11_9, SCIF2_RXD_B, SEL_SCIF2_1), 1465 PINMUX_IPSR_MSEL(IP13_11_9, SCIF2_RXD_B, SEL_SCIF2_1),
1467 PINMUX_IPSR_MODSEL_DATA(IP13_11_9, I2C3_SCL_E, SEL_I2C03_4), 1466 PINMUX_IPSR_MSEL(IP13_11_9, I2C3_SCL_E, SEL_I2C03_4),
1468 PINMUX_IPSR_DATA(IP13_11_9, VI1_DATA6), 1467 PINMUX_IPSR_DATA(IP13_11_9, VI1_DATA6),
1469 PINMUX_IPSR_DATA(IP13_11_9, ATARD0_N), 1468 PINMUX_IPSR_DATA(IP13_11_9, ATARD0_N),
1470 PINMUX_IPSR_MODSEL_DATA(IP13_11_9, ETH_TX_EN_B, SEL_ETH_1), 1469 PINMUX_IPSR_MSEL(IP13_11_9, ETH_TX_EN_B, SEL_ETH_1),
1471 PINMUX_IPSR_MODSEL_DATA(IP13_14_12, SSI_SDATA9, SEL_SSI9_0), 1470 PINMUX_IPSR_MSEL(IP13_14_12, SSI_SDATA9, SEL_SSI9_0),
1472 PINMUX_IPSR_MODSEL_DATA(IP13_14_12, SCIF2_TXD_B, SEL_SCIF2_1), 1471 PINMUX_IPSR_MSEL(IP13_14_12, SCIF2_TXD_B, SEL_SCIF2_1),
1473 PINMUX_IPSR_MODSEL_DATA(IP13_14_12, I2C3_SDA_E, SEL_I2C03_4), 1472 PINMUX_IPSR_MSEL(IP13_14_12, I2C3_SDA_E, SEL_I2C03_4),
1474 PINMUX_IPSR_DATA(IP13_14_12, VI1_DATA7), 1473 PINMUX_IPSR_DATA(IP13_14_12, VI1_DATA7),
1475 PINMUX_IPSR_DATA(IP13_14_12, ATADIR0_N), 1474 PINMUX_IPSR_DATA(IP13_14_12, ATADIR0_N),
1476 PINMUX_IPSR_MODSEL_DATA(IP13_14_12, ETH_MAGIC_B, SEL_ETH_1), 1475 PINMUX_IPSR_MSEL(IP13_14_12, ETH_MAGIC_B, SEL_ETH_1),
1477 PINMUX_IPSR_MODSEL_DATA(IP13_17_15, AUDIO_CLKA, SEL_ADG_0), 1476 PINMUX_IPSR_MSEL(IP13_17_15, AUDIO_CLKA, SEL_ADG_0),
1478 PINMUX_IPSR_MODSEL_DATA(IP13_17_15, I2C0_SCL_B, SEL_I2C00_1), 1477 PINMUX_IPSR_MSEL(IP13_17_15, I2C0_SCL_B, SEL_I2C00_1),
1479 PINMUX_IPSR_MODSEL_DATA(IP13_17_15, SCIFA4_RXD_D, SEL_SCIFA4_3), 1478 PINMUX_IPSR_MSEL(IP13_17_15, SCIFA4_RXD_D, SEL_SCIFA4_3),
1480 PINMUX_IPSR_DATA(IP13_17_15, VI1_CLKENB), 1479 PINMUX_IPSR_DATA(IP13_17_15, VI1_CLKENB),
1481 PINMUX_IPSR_MODSEL_DATA(IP13_17_15, TS_SDATA_C, SEL_TSIF0_2), 1480 PINMUX_IPSR_MSEL(IP13_17_15, TS_SDATA_C, SEL_TSIF0_2),
1482 PINMUX_IPSR_MODSEL_DATA(IP13_17_15, RIF0_SYNC_B, SEL_DR0_1), 1481 PINMUX_IPSR_MSEL(IP13_17_15, RIF0_SYNC_B, SEL_DR0_1),
1483 PINMUX_IPSR_MODSEL_DATA(IP13_17_15, ETH_TXD0_B, SEL_ETH_1), 1482 PINMUX_IPSR_MSEL(IP13_17_15, ETH_TXD0_B, SEL_ETH_1),
1484 PINMUX_IPSR_MODSEL_DATA(IP13_20_18, AUDIO_CLKB, SEL_ADG_0), 1483 PINMUX_IPSR_MSEL(IP13_20_18, AUDIO_CLKB, SEL_ADG_0),
1485 PINMUX_IPSR_MODSEL_DATA(IP13_20_18, I2C0_SDA_B, SEL_I2C00_1), 1484 PINMUX_IPSR_MSEL(IP13_20_18, I2C0_SDA_B, SEL_I2C00_1),
1486 PINMUX_IPSR_MODSEL_DATA(IP13_20_18, SCIFA4_TXD_D, SEL_SCIFA4_3), 1485 PINMUX_IPSR_MSEL(IP13_20_18, SCIFA4_TXD_D, SEL_SCIFA4_3),
1487 PINMUX_IPSR_DATA(IP13_20_18, VI1_FIELD), 1486 PINMUX_IPSR_DATA(IP13_20_18, VI1_FIELD),
1488 PINMUX_IPSR_MODSEL_DATA(IP13_20_18, TS_SCK_C, SEL_TSIF0_2), 1487 PINMUX_IPSR_MSEL(IP13_20_18, TS_SCK_C, SEL_TSIF0_2),
1489 PINMUX_IPSR_MODSEL_DATA(IP13_20_18, RIF0_CLK_B, SEL_DR0_1), 1488 PINMUX_IPSR_MSEL(IP13_20_18, RIF0_CLK_B, SEL_DR0_1),
1490 PINMUX_IPSR_MODSEL_DATA(IP13_20_18, BPFCLK_E, SEL_DARC_4), 1489 PINMUX_IPSR_MSEL(IP13_20_18, BPFCLK_E, SEL_DARC_4),
1491 PINMUX_IPSR_MODSEL_DATA(IP13_20_18, ETH_MDC_B, SEL_ETH_1), 1490 PINMUX_IPSR_MSEL(IP13_20_18, ETH_MDC_B, SEL_ETH_1),
1492 PINMUX_IPSR_MODSEL_DATA(IP13_23_21, AUDIO_CLKC, SEL_ADG_0), 1491 PINMUX_IPSR_MSEL(IP13_23_21, AUDIO_CLKC, SEL_ADG_0),
1493 PINMUX_IPSR_MODSEL_DATA(IP13_23_21, I2C4_SCL_B, SEL_I2C04_1), 1492 PINMUX_IPSR_MSEL(IP13_23_21, I2C4_SCL_B, SEL_I2C04_1),
1494 PINMUX_IPSR_MODSEL_DATA(IP13_23_21, SCIFA5_RXD_D, SEL_SCIFA5_3), 1493 PINMUX_IPSR_MSEL(IP13_23_21, SCIFA5_RXD_D, SEL_SCIFA5_3),
1495 PINMUX_IPSR_DATA(IP13_23_21, VI1_HSYNC_N), 1494 PINMUX_IPSR_DATA(IP13_23_21, VI1_HSYNC_N),
1496 PINMUX_IPSR_MODSEL_DATA(IP13_23_21, TS_SDEN_C, SEL_TSIF0_2), 1495 PINMUX_IPSR_MSEL(IP13_23_21, TS_SDEN_C, SEL_TSIF0_2),
1497 PINMUX_IPSR_MODSEL_DATA(IP13_23_21, RIF0_D0_B, SEL_DR0_1), 1496 PINMUX_IPSR_MSEL(IP13_23_21, RIF0_D0_B, SEL_DR0_1),
1498 PINMUX_IPSR_MODSEL_DATA(IP13_23_21, FMCLK_E, SEL_DARC_4), 1497 PINMUX_IPSR_MSEL(IP13_23_21, FMCLK_E, SEL_DARC_4),
1499 PINMUX_IPSR_MODSEL_DATA(IP13_23_21, RDS_CLK_D, SEL_RDS_3), 1498 PINMUX_IPSR_MSEL(IP13_23_21, RDS_CLK_D, SEL_RDS_3),
1500 PINMUX_IPSR_MODSEL_DATA(IP13_26_24, AUDIO_CLKOUT, SEL_ADG_0), 1499 PINMUX_IPSR_MSEL(IP13_26_24, AUDIO_CLKOUT, SEL_ADG_0),
1501 PINMUX_IPSR_MODSEL_DATA(IP13_26_24, I2C4_SDA_B, SEL_I2C04_1), 1500 PINMUX_IPSR_MSEL(IP13_26_24, I2C4_SDA_B, SEL_I2C04_1),
1502 PINMUX_IPSR_MODSEL_DATA(IP13_26_24, SCIFA5_TXD_D, SEL_SCIFA5_3), 1501 PINMUX_IPSR_MSEL(IP13_26_24, SCIFA5_TXD_D, SEL_SCIFA5_3),
1503 PINMUX_IPSR_DATA(IP13_26_24, VI1_VSYNC_N), 1502 PINMUX_IPSR_DATA(IP13_26_24, VI1_VSYNC_N),
1504 PINMUX_IPSR_MODSEL_DATA(IP13_26_24, TS_SPSYNC_C, SEL_TSIF0_2), 1503 PINMUX_IPSR_MSEL(IP13_26_24, TS_SPSYNC_C, SEL_TSIF0_2),
1505 PINMUX_IPSR_MODSEL_DATA(IP13_26_24, RIF0_D1_B, SEL_DR1_1), 1504 PINMUX_IPSR_MSEL(IP13_26_24, RIF0_D1_B, SEL_DR1_1),
1506 PINMUX_IPSR_MODSEL_DATA(IP13_26_24, FMIN_E, SEL_DARC_4), 1505 PINMUX_IPSR_MSEL(IP13_26_24, FMIN_E, SEL_DARC_4),
1507 PINMUX_IPSR_MODSEL_DATA(IP13_26_24, RDS_DATA_D, SEL_RDS_3), 1506 PINMUX_IPSR_MSEL(IP13_26_24, RDS_DATA_D, SEL_RDS_3),
1508}; 1507};
1509 1508
1510static const struct sh_pfc_pin pinmux_pins[] = { 1509static const struct sh_pfc_pin pinmux_pins[] = {
@@ -2197,13 +2196,6 @@ static const unsigned int scif0_data_pins[] = {
2197static const unsigned int scif0_data_mux[] = { 2196static const unsigned int scif0_data_mux[] = {
2198 SCIF0_RXD_MARK, SCIF0_TXD_MARK, 2197 SCIF0_RXD_MARK, SCIF0_TXD_MARK,
2199}; 2198};
2200static const unsigned int scif0_clk_pins[] = {
2201 /* SCK */
2202 RCAR_GP_PIN(1, 23),
2203};
2204static const unsigned int scif0_clk_mux[] = {
2205 SCIF_CLK_MARK,
2206};
2207static const unsigned int scif0_data_b_pins[] = { 2199static const unsigned int scif0_data_b_pins[] = {
2208 /* RX, TX */ 2200 /* RX, TX */
2209 RCAR_GP_PIN(3, 11), RCAR_GP_PIN(3, 12), 2201 RCAR_GP_PIN(3, 11), RCAR_GP_PIN(3, 12),
@@ -2211,13 +2203,6 @@ static const unsigned int scif0_data_b_pins[] = {
2211static const unsigned int scif0_data_b_mux[] = { 2203static const unsigned int scif0_data_b_mux[] = {
2212 SCIF0_RXD_B_MARK, SCIF0_TXD_B_MARK, 2204 SCIF0_RXD_B_MARK, SCIF0_TXD_B_MARK,
2213}; 2205};
2214static const unsigned int scif0_clk_b_pins[] = {
2215 /* SCK */
2216 RCAR_GP_PIN(3, 29),
2217};
2218static const unsigned int scif0_clk_b_mux[] = {
2219 SCIF_CLK_B_MARK,
2220};
2221static const unsigned int scif0_data_c_pins[] = { 2206static const unsigned int scif0_data_c_pins[] = {
2222 /* RX, TX */ 2207 /* RX, TX */
2223 RCAR_GP_PIN(3, 30), RCAR_GP_PIN(3, 31), 2208 RCAR_GP_PIN(3, 30), RCAR_GP_PIN(3, 31),
@@ -2788,6 +2773,146 @@ static const unsigned int usb1_mux[] = {
2788 USB1_PWEN_MARK, 2773 USB1_PWEN_MARK,
2789 USB1_OVC_MARK, 2774 USB1_OVC_MARK,
2790}; 2775};
2776/* - VIN0 ------------------------------------------------------------------- */
2777static const union vin_data vin0_data_pins = {
2778 .data24 = {
2779 /* B */
2780 RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 2),
2781 RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
2782 RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 6),
2783 RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 8),
2784 /* G */
2785 RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 14),
2786 RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
2787 RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 18),
2788 RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 20),
2789 /* R */
2790 RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 22),
2791 RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 24),
2792 RCAR_GP_PIN(3, 25), RCAR_GP_PIN(3, 26),
2793 RCAR_GP_PIN(3, 27), RCAR_GP_PIN(3, 28),
2794 },
2795};
2796static const union vin_data vin0_data_mux = {
2797 .data24 = {
2798 /* B */
2799 VI0_DATA0_VI0_B0_MARK, VI0_DATA1_VI0_B1_MARK,
2800 VI0_DATA2_VI0_B2_MARK, VI0_DATA3_VI0_B3_MARK,
2801 VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
2802 VI0_DATA6_VI0_B6_MARK, VI0_DATA7_VI0_B7_MARK,
2803 /* G */
2804 VI0_G0_MARK, VI0_G1_MARK,
2805 VI0_G2_MARK, VI0_G3_MARK,
2806 VI0_G4_MARK, VI0_G5_MARK,
2807 VI0_G6_MARK, VI0_G7_MARK,
2808 /* R */
2809 VI0_R0_MARK, VI0_R1_MARK,
2810 VI0_R2_MARK, VI0_R3_MARK,
2811 VI0_R4_MARK, VI0_R5_MARK,
2812 VI0_R6_MARK, VI0_R7_MARK,
2813 },
2814};
2815static const unsigned int vin0_data18_pins[] = {
2816 /* B */
2817 RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
2818 RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 6),
2819 RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 8),
2820 /* G */
2821 RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
2822 RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 18),
2823 RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 20),
2824 /* R */
2825 RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 24),
2826 RCAR_GP_PIN(3, 25), RCAR_GP_PIN(3, 26),
2827 RCAR_GP_PIN(3, 27), RCAR_GP_PIN(3, 28),
2828};
2829static const unsigned int vin0_data18_mux[] = {
2830 /* B */
2831 VI0_DATA2_VI0_B2_MARK, VI0_DATA3_VI0_B3_MARK,
2832 VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
2833 VI0_DATA6_VI0_B6_MARK, VI0_DATA7_VI0_B7_MARK,
2834 /* G */
2835 VI0_G2_MARK, VI0_G3_MARK,
2836 VI0_G4_MARK, VI0_G5_MARK,
2837 VI0_G6_MARK, VI0_G7_MARK,
2838 /* R */
2839 VI0_R2_MARK, VI0_R3_MARK,
2840 VI0_R4_MARK, VI0_R5_MARK,
2841 VI0_R6_MARK, VI0_R7_MARK,
2842};
2843static const unsigned int vin0_sync_pins[] = {
2844 RCAR_GP_PIN(3, 11), /* HSYNC */
2845 RCAR_GP_PIN(3, 12), /* VSYNC */
2846};
2847static const unsigned int vin0_sync_mux[] = {
2848 VI0_HSYNC_N_MARK,
2849 VI0_VSYNC_N_MARK,
2850};
2851static const unsigned int vin0_field_pins[] = {
2852 RCAR_GP_PIN(3, 10),
2853};
2854static const unsigned int vin0_field_mux[] = {
2855 VI0_FIELD_MARK,
2856};
2857static const unsigned int vin0_clkenb_pins[] = {
2858 RCAR_GP_PIN(3, 9),
2859};
2860static const unsigned int vin0_clkenb_mux[] = {
2861 VI0_CLKENB_MARK,
2862};
2863static const unsigned int vin0_clk_pins[] = {
2864 RCAR_GP_PIN(3, 0),
2865};
2866static const unsigned int vin0_clk_mux[] = {
2867 VI0_CLK_MARK,
2868};
2869/* - VIN1 ------------------------------------------------------------------- */
2870static const union vin_data vin1_data_pins = {
2871 .data12 = {
2872 RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 13),
2873 RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15),
2874 RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 17),
2875 RCAR_GP_PIN(5, 18), RCAR_GP_PIN(5, 19),
2876 RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 11),
2877 RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
2878 },
2879};
2880static const union vin_data vin1_data_mux = {
2881 .data12 = {
2882 VI1_DATA0_MARK, VI1_DATA1_MARK,
2883 VI1_DATA2_MARK, VI1_DATA3_MARK,
2884 VI1_DATA4_MARK, VI1_DATA5_MARK,
2885 VI1_DATA6_MARK, VI1_DATA7_MARK,
2886 VI1_DATA8_MARK, VI1_DATA9_MARK,
2887 VI1_DATA10_MARK, VI1_DATA11_MARK,
2888 },
2889};
2890static const unsigned int vin1_sync_pins[] = {
2891 RCAR_GP_PIN(5, 22), /* HSYNC */
2892 RCAR_GP_PIN(5, 23), /* VSYNC */
2893};
2894static const unsigned int vin1_sync_mux[] = {
2895 VI1_HSYNC_N_MARK,
2896 VI1_VSYNC_N_MARK,
2897};
2898static const unsigned int vin1_field_pins[] = {
2899 RCAR_GP_PIN(5, 21),
2900};
2901static const unsigned int vin1_field_mux[] = {
2902 VI1_FIELD_MARK,
2903};
2904static const unsigned int vin1_clkenb_pins[] = {
2905 RCAR_GP_PIN(5, 20),
2906};
2907static const unsigned int vin1_clkenb_mux[] = {
2908 VI1_CLKENB_MARK,
2909};
2910static const unsigned int vin1_clk_pins[] = {
2911 RCAR_GP_PIN(5, 11),
2912};
2913static const unsigned int vin1_clk_mux[] = {
2914 VI1_CLK_MARK,
2915};
2791 2916
2792static const struct sh_pfc_pin_group pinmux_groups[] = { 2917static const struct sh_pfc_pin_group pinmux_groups[] = {
2793 SH_PFC_PIN_GROUP(eth_link), 2918 SH_PFC_PIN_GROUP(eth_link),
@@ -2884,9 +3009,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
2884 SH_PFC_PIN_GROUP(qspi_data2), 3009 SH_PFC_PIN_GROUP(qspi_data2),
2885 SH_PFC_PIN_GROUP(qspi_data4), 3010 SH_PFC_PIN_GROUP(qspi_data4),
2886 SH_PFC_PIN_GROUP(scif0_data), 3011 SH_PFC_PIN_GROUP(scif0_data),
2887 SH_PFC_PIN_GROUP(scif0_clk),
2888 SH_PFC_PIN_GROUP(scif0_data_b), 3012 SH_PFC_PIN_GROUP(scif0_data_b),
2889 SH_PFC_PIN_GROUP(scif0_clk_b),
2890 SH_PFC_PIN_GROUP(scif0_data_c), 3013 SH_PFC_PIN_GROUP(scif0_data_c),
2891 SH_PFC_PIN_GROUP(scif0_data_d), 3014 SH_PFC_PIN_GROUP(scif0_data_d),
2892 SH_PFC_PIN_GROUP(scif1_data), 3015 SH_PFC_PIN_GROUP(scif1_data),
@@ -2965,6 +3088,24 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
2965 SH_PFC_PIN_GROUP(sdhi2_wp), 3088 SH_PFC_PIN_GROUP(sdhi2_wp),
2966 SH_PFC_PIN_GROUP(usb0), 3089 SH_PFC_PIN_GROUP(usb0),
2967 SH_PFC_PIN_GROUP(usb1), 3090 SH_PFC_PIN_GROUP(usb1),
3091 VIN_DATA_PIN_GROUP(vin0_data, 24),
3092 VIN_DATA_PIN_GROUP(vin0_data, 20),
3093 SH_PFC_PIN_GROUP(vin0_data18),
3094 VIN_DATA_PIN_GROUP(vin0_data, 16),
3095 VIN_DATA_PIN_GROUP(vin0_data, 12),
3096 VIN_DATA_PIN_GROUP(vin0_data, 10),
3097 VIN_DATA_PIN_GROUP(vin0_data, 8),
3098 SH_PFC_PIN_GROUP(vin0_sync),
3099 SH_PFC_PIN_GROUP(vin0_field),
3100 SH_PFC_PIN_GROUP(vin0_clkenb),
3101 SH_PFC_PIN_GROUP(vin0_clk),
3102 VIN_DATA_PIN_GROUP(vin1_data, 12),
3103 VIN_DATA_PIN_GROUP(vin1_data, 10),
3104 VIN_DATA_PIN_GROUP(vin1_data, 8),
3105 SH_PFC_PIN_GROUP(vin1_sync),
3106 SH_PFC_PIN_GROUP(vin1_field),
3107 SH_PFC_PIN_GROUP(vin1_clkenb),
3108 SH_PFC_PIN_GROUP(vin1_clk),
2968}; 3109};
2969 3110
2970static const char * const eth_groups[] = { 3111static const char * const eth_groups[] = {
@@ -3107,9 +3248,7 @@ static const char * const qspi_groups[] = {
3107 3248
3108static const char * const scif0_groups[] = { 3249static const char * const scif0_groups[] = {
3109 "scif0_data", 3250 "scif0_data",
3110 "scif0_clk",
3111 "scif0_data_b", 3251 "scif0_data_b",
3112 "scif0_clk_b",
3113 "scif0_data_c", 3252 "scif0_data_c",
3114 "scif0_data_d", 3253 "scif0_data_d",
3115}; 3254};
@@ -3247,6 +3386,30 @@ static const char * const usb1_groups[] = {
3247 "usb1", 3386 "usb1",
3248}; 3387};
3249 3388
3389static const char * const vin0_groups[] = {
3390 "vin0_data24",
3391 "vin0_data20",
3392 "vin0_data18",
3393 "vin0_data16",
3394 "vin0_data12",
3395 "vin0_data10",
3396 "vin0_data8",
3397 "vin0_sync",
3398 "vin0_field",
3399 "vin0_clkenb",
3400 "vin0_clk",
3401};
3402
3403static const char * const vin1_groups[] = {
3404 "vin1_data12",
3405 "vin1_data10",
3406 "vin1_data8",
3407 "vin1_sync",
3408 "vin1_field",
3409 "vin1_clkenb",
3410 "vin1_clk",
3411};
3412
3250static const struct sh_pfc_function pinmux_functions[] = { 3413static const struct sh_pfc_function pinmux_functions[] = {
3251 SH_PFC_FUNCTION(eth), 3414 SH_PFC_FUNCTION(eth),
3252 SH_PFC_FUNCTION(hscif0), 3415 SH_PFC_FUNCTION(hscif0),
@@ -3283,6 +3446,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
3283 SH_PFC_FUNCTION(sdhi2), 3446 SH_PFC_FUNCTION(sdhi2),
3284 SH_PFC_FUNCTION(usb0), 3447 SH_PFC_FUNCTION(usb0),
3285 SH_PFC_FUNCTION(usb1), 3448 SH_PFC_FUNCTION(usb1),
3449 SH_PFC_FUNCTION(vin0),
3450 SH_PFC_FUNCTION(vin1),
3286}; 3451};
3287 3452
3288static const struct pinmux_cfg_reg pinmux_config_regs[] = { 3453static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -4232,6 +4397,6 @@ const struct sh_pfc_soc_info r8a7794_pinmux_info = {
4232 4397
4233 .cfg_regs = pinmux_config_regs, 4398 .cfg_regs = pinmux_config_regs,
4234 4399
4235 .gpio_data = pinmux_data, 4400 .pinmux_data = pinmux_data,
4236 .gpio_data_size = ARRAY_SIZE(pinmux_data), 4401 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
4237}; 4402};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
new file mode 100644
index 000000000000..7ddb2adfc5a5
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -0,0 +1,2816 @@
1/*
2 * R-Car Gen3 processor support - PFC hardware block.
3 *
4 * Copyright (C) 2015 Renesas Electronics Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 */
10
11#include <linux/kernel.h>
12
13#include "core.h"
14#include "sh_pfc.h"
15
16#define PORT_GP_3(bank, fn, sfx) \
17 PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
18 PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx)
19
20#define PORT_GP_14(bank, fn, sfx) \
21 PORT_GP_3(bank, fn, sfx), \
22 PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
23 PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
24 PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \
25 PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \
26 PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \
27 PORT_GP_1(bank, 14, fn, sfx)
28
29#define PORT_GP_15(bank, fn, sfx) \
30 PORT_GP_14(bank, fn, sfx), PORT_GP_1(bank, 15, fn, sfx)
31
32#define PORT_GP_17(bank, fn, sfx) \
33 PORT_GP_15(bank, fn, sfx), \
34 PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx)
35
36#define PORT_GP_25(bank, fn, sfx) \
37 PORT_GP_17(bank, fn, sfx), \
38 PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \
39 PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \
40 PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \
41 PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx)
42
43#define PORT_GP_27(bank, fn, sfx) \
44 PORT_GP_25(bank, fn, sfx), \
45 PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx)
46
47#define CPU_ALL_PORT(fn, sfx) \
48 PORT_GP_15(0, fn, sfx), \
49 PORT_GP_27(1, fn, sfx), \
50 PORT_GP_14(2, fn, sfx), \
51 PORT_GP_15(3, fn, sfx), \
52 PORT_GP_17(4, fn, sfx), \
53 PORT_GP_25(5, fn, sfx), \
54 PORT_GP_32(6, fn, sfx), \
55 PORT_GP_3(7, fn, sfx)
56/*
57 * F_() : just information
58 * FM() : macro for FN_xxx / xxx_MARK
59 */
60
61/* GPSR0 */
62#define GPSR0_15 F_(D15, IP7_11_8)
63#define GPSR0_14 F_(D14, IP7_7_4)
64#define GPSR0_13 F_(D13, IP7_3_0)
65#define GPSR0_12 F_(D12, IP6_31_28)
66#define GPSR0_11 F_(D11, IP6_27_24)
67#define GPSR0_10 F_(D10, IP6_23_20)
68#define GPSR0_9 F_(D9, IP6_19_16)
69#define GPSR0_8 F_(D8, IP6_15_12)
70#define GPSR0_7 F_(D7, IP6_11_8)
71#define GPSR0_6 F_(D6, IP6_7_4)
72#define GPSR0_5 F_(D5, IP6_3_0)
73#define GPSR0_4 F_(D4, IP5_31_28)
74#define GPSR0_3 F_(D3, IP5_27_24)
75#define GPSR0_2 F_(D2, IP5_23_20)
76#define GPSR0_1 F_(D1, IP5_19_16)
77#define GPSR0_0 F_(D0, IP5_15_12)
78
79/* GPSR1 */
80#define GPSR1_27 F_(EX_WAIT0_A, IP5_11_8)
81#define GPSR1_26 F_(WE1_N, IP5_7_4)
82#define GPSR1_25 F_(WE0_N, IP5_3_0)
83#define GPSR1_24 F_(RD_WR_N, IP4_31_28)
84#define GPSR1_23 F_(RD_N, IP4_27_24)
85#define GPSR1_22 F_(BS_N, IP4_23_20)
86#define GPSR1_21 F_(CS1_N_A26, IP4_19_16)
87#define GPSR1_20 F_(CS0_N, IP4_15_12)
88#define GPSR1_19 F_(A19, IP4_11_8)
89#define GPSR1_18 F_(A18, IP4_7_4)
90#define GPSR1_17 F_(A17, IP4_3_0)
91#define GPSR1_16 F_(A16, IP3_31_28)
92#define GPSR1_15 F_(A15, IP3_27_24)
93#define GPSR1_14 F_(A14, IP3_23_20)
94#define GPSR1_13 F_(A13, IP3_19_16)
95#define GPSR1_12 F_(A12, IP3_15_12)
96#define GPSR1_11 F_(A11, IP3_11_8)
97#define GPSR1_10 F_(A10, IP3_7_4)
98#define GPSR1_9 F_(A9, IP3_3_0)
99#define GPSR1_8 F_(A8, IP2_31_28)
100#define GPSR1_7 F_(A7, IP2_27_24)
101#define GPSR1_6 F_(A6, IP2_23_20)
102#define GPSR1_5 F_(A5, IP2_19_16)
103#define GPSR1_4 F_(A4, IP2_15_12)
104#define GPSR1_3 F_(A3, IP2_11_8)
105#define GPSR1_2 F_(A2, IP2_7_4)
106#define GPSR1_1 F_(A1, IP2_3_0)
107#define GPSR1_0 F_(A0, IP1_31_28)
108
109/* GPSR2 */
110#define GPSR2_14 F_(AVB_AVTP_CAPTURE_A, IP0_23_20)
111#define GPSR2_13 F_(AVB_AVTP_MATCH_A, IP0_19_16)
112#define GPSR2_12 F_(AVB_LINK, IP0_15_12)
113#define GPSR2_11 F_(AVB_PHY_INT, IP0_11_8)
114#define GPSR2_10 F_(AVB_MAGIC, IP0_7_4)
115#define GPSR2_9 F_(AVB_MDC, IP0_3_0)
116#define GPSR2_8 F_(PWM2_A, IP1_27_24)
117#define GPSR2_7 F_(PWM1_A, IP1_23_20)
118#define GPSR2_6 F_(PWM0, IP1_19_16)
119#define GPSR2_5 F_(IRQ5, IP1_15_12)
120#define GPSR2_4 F_(IRQ4, IP1_11_8)
121#define GPSR2_3 F_(IRQ3, IP1_7_4)
122#define GPSR2_2 F_(IRQ2, IP1_3_0)
123#define GPSR2_1 F_(IRQ1, IP0_31_28)
124#define GPSR2_0 F_(IRQ0, IP0_27_24)
125
126/* GPSR3 */
127#define GPSR3_15 F_(SD1_WP, IP10_23_20)
128#define GPSR3_14 F_(SD1_CD, IP10_19_16)
129#define GPSR3_13 F_(SD0_WP, IP10_15_12)
130#define GPSR3_12 F_(SD0_CD, IP10_11_8)
131#define GPSR3_11 F_(SD1_DAT3, IP8_31_28)
132#define GPSR3_10 F_(SD1_DAT2, IP8_27_24)
133#define GPSR3_9 F_(SD1_DAT1, IP8_23_20)
134#define GPSR3_8 F_(SD1_DAT0, IP8_19_16)
135#define GPSR3_7 F_(SD1_CMD, IP8_15_12)
136#define GPSR3_6 F_(SD1_CLK, IP8_11_8)
137#define GPSR3_5 F_(SD0_DAT3, IP8_7_4)
138#define GPSR3_4 F_(SD0_DAT2, IP8_3_0)
139#define GPSR3_3 F_(SD0_DAT1, IP7_31_28)
140#define GPSR3_2 F_(SD0_DAT0, IP7_27_24)
141#define GPSR3_1 F_(SD0_CMD, IP7_23_20)
142#define GPSR3_0 F_(SD0_CLK, IP7_19_16)
143
144/* GPSR4 */
145#define GPSR4_17 FM(SD3_DS)
146#define GPSR4_16 F_(SD3_DAT7, IP10_7_4)
147#define GPSR4_15 F_(SD3_DAT6, IP10_3_0)
148#define GPSR4_14 F_(SD3_DAT5, IP9_31_28)
149#define GPSR4_13 F_(SD3_DAT4, IP9_27_24)
150#define GPSR4_12 FM(SD3_DAT3)
151#define GPSR4_11 FM(SD3_DAT2)
152#define GPSR4_10 FM(SD3_DAT1)
153#define GPSR4_9 FM(SD3_DAT0)
154#define GPSR4_8 FM(SD3_CMD)
155#define GPSR4_7 FM(SD3_CLK)
156#define GPSR4_6 F_(SD2_DS, IP9_23_20)
157#define GPSR4_5 F_(SD2_DAT3, IP9_19_16)
158#define GPSR4_4 F_(SD2_DAT2, IP9_15_12)
159#define GPSR4_3 F_(SD2_DAT1, IP9_11_8)
160#define GPSR4_2 F_(SD2_DAT0, IP9_7_4)
161#define GPSR4_1 FM(SD2_CMD)
162#define GPSR4_0 F_(SD2_CLK, IP9_3_0)
163
164/* GPSR5 */
165#define GPSR5_25 F_(MLB_DAT, IP13_19_16)
166#define GPSR5_24 F_(MLB_SIG, IP13_15_12)
167#define GPSR5_23 F_(MLB_CLK, IP13_11_8)
168#define GPSR5_22 FM(MSIOF0_RXD)
169#define GPSR5_21 F_(MSIOF0_SS2, IP13_7_4)
170#define GPSR5_20 FM(MSIOF0_TXD)
171#define GPSR5_19 F_(MSIOF0_SS1, IP13_3_0)
172#define GPSR5_18 F_(MSIOF0_SYNC, IP12_31_28)
173#define GPSR5_17 FM(MSIOF0_SCK)
174#define GPSR5_16 F_(HRTS0_N, IP12_27_24)
175#define GPSR5_15 F_(HCTS0_N, IP12_23_20)
176#define GPSR5_14 F_(HTX0, IP12_19_16)
177#define GPSR5_13 F_(HRX0, IP12_15_12)
178#define GPSR5_12 F_(HSCK0, IP12_11_8)
179#define GPSR5_11 F_(RX2_A, IP12_7_4)
180#define GPSR5_10 F_(TX2_A, IP12_3_0)
181#define GPSR5_9 F_(SCK2, IP11_31_28)
182#define GPSR5_8 F_(RTS1_N_TANS, IP11_27_24)
183#define GPSR5_7 F_(CTS1_N, IP11_23_20)
184#define GPSR5_6 F_(TX1_A, IP11_19_16)
185#define GPSR5_5 F_(RX1_A, IP11_15_12)
186#define GPSR5_4 F_(RTS0_N_TANS, IP11_11_8)
187#define GPSR5_3 F_(CTS0_N, IP11_7_4)
188#define GPSR5_2 F_(TX0, IP11_3_0)
189#define GPSR5_1 F_(RX0, IP10_31_28)
190#define GPSR5_0 F_(SCK0, IP10_27_24)
191
192/* GPSR6 */
193#define GPSR6_31 F_(USB31_OVC, IP17_7_4)
194#define GPSR6_30 F_(USB31_PWEN, IP17_3_0)
195#define GPSR6_29 F_(USB30_OVC, IP16_31_28)
196#define GPSR6_28 F_(USB30_PWEN, IP16_27_24)
197#define GPSR6_27 F_(USB1_OVC, IP16_23_20)
198#define GPSR6_26 F_(USB1_PWEN, IP16_19_16)
199#define GPSR6_25 F_(USB0_OVC, IP16_15_12)
200#define GPSR6_24 F_(USB0_PWEN, IP16_11_8)
201#define GPSR6_23 F_(AUDIO_CLKB_B, IP16_7_4)
202#define GPSR6_22 F_(AUDIO_CLKA_A, IP16_3_0)
203#define GPSR6_21 F_(SSI_SDATA9_A, IP15_31_28)
204#define GPSR6_20 F_(SSI_SDATA8, IP15_27_24)
205#define GPSR6_19 F_(SSI_SDATA7, IP15_23_20)
206#define GPSR6_18 F_(SSI_WS78, IP15_19_16)
207#define GPSR6_17 F_(SSI_SCK78, IP15_15_12)
208#define GPSR6_16 F_(SSI_SDATA6, IP15_11_8)
209#define GPSR6_15 F_(SSI_WS6, IP15_7_4)
210#define GPSR6_14 F_(SSI_SCK6, IP15_3_0)
211#define GPSR6_13 FM(SSI_SDATA5)
212#define GPSR6_12 FM(SSI_WS5)
213#define GPSR6_11 FM(SSI_SCK5)
214#define GPSR6_10 F_(SSI_SDATA4, IP14_31_28)
215#define GPSR6_9 F_(SSI_WS4, IP14_27_24)
216#define GPSR6_8 F_(SSI_SCK4, IP14_23_20)
217#define GPSR6_7 F_(SSI_SDATA3, IP14_19_16)
218#define GPSR6_6 F_(SSI_WS34, IP14_15_12)
219#define GPSR6_5 F_(SSI_SCK34, IP14_11_8)
220#define GPSR6_4 F_(SSI_SDATA2_A, IP14_7_4)
221#define GPSR6_3 F_(SSI_SDATA1_A, IP14_3_0)
222#define GPSR6_2 F_(SSI_SDATA0, IP13_31_28)
223#define GPSR6_1 F_(SSI_WS0129, IP13_27_24)
224#define GPSR6_0 F_(SSI_SCK0129, IP13_23_20)
225
226/* GPSR7 */
227#define GPSR7_3 FM(HDMI1_CEC)
228#define GPSR7_2 FM(HDMI0_CEC)
229#define GPSR7_1 FM(AVS2)
230#define GPSR7_0 FM(AVS1)
231
232
233/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* A */ /* B */ /* C - F */
234#define IP0_3_0 FM(AVB_MDC) F_(0, 0) FM(MSIOF2_SS2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
235#define IP0_7_4 FM(AVB_MAGIC) F_(0, 0) FM(MSIOF2_SS1_C) FM(SCK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
236#define IP0_11_8 FM(AVB_PHY_INT) F_(0, 0) FM(MSIOF2_SYNC_C) FM(RX4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
237#define IP0_15_12 FM(AVB_LINK) F_(0, 0) FM(MSIOF2_SCK_C) FM(TX4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
238#define IP0_19_16 FM(AVB_AVTP_MATCH_A) F_(0, 0) FM(MSIOF2_RXD_C) FM(CTS4_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
239#define IP0_23_20 FM(AVB_AVTP_CAPTURE_A) F_(0, 0) FM(MSIOF2_TXD_C) FM(RTS4_N_TANS_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
240#define IP0_27_24 FM(IRQ0) FM(QPOLB) F_(0, 0) FM(DU_CDE) FM(VI4_DATA0_B) FM(CAN0_TX_B) FM(CANFD0_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
241#define IP0_31_28 FM(IRQ1) FM(QPOLA) F_(0, 0) FM(DU_DISP) FM(VI4_DATA1_B) FM(CAN0_RX_B) FM(CANFD0_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
242#define IP1_3_0 FM(IRQ2) FM(QCPV_QDE) F_(0, 0) FM(DU_EXODDF_DU_ODDF_DISP_CDE) FM(VI4_DATA2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
243#define IP1_7_4 FM(IRQ3) FM(QSTVB_QVE) FM(A25) FM(DU_DOTCLKOUT1) FM(VI4_DATA3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
244#define IP1_11_8 FM(IRQ4) FM(QSTH_QHS) FM(A24) FM(DU_EXHSYNC_DU_HSYNC) FM(VI4_DATA4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM5_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
245#define IP1_15_12 FM(IRQ5) FM(QSTB_QHE) FM(A23) FM(DU_EXVSYNC_DU_VSYNC) FM(VI4_DATA5_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM6_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
246#define IP1_19_16 FM(PWM0) FM(AVB_AVTP_PPS)FM(A22) F_(0, 0) FM(VI4_DATA6_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(IECLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
247#define IP1_23_20 FM(PWM1_A) F_(0, 0) FM(A21) FM(HRX3_D) FM(VI4_DATA7_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(IERX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
248#define IP1_27_24 FM(PWM2_A) F_(0, 0) FM(A20) FM(HTX3_D) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(IETX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
249#define IP1_31_28 FM(A0) FM(LCDOUT16) FM(MSIOF3_SYNC_B) F_(0, 0) FM(VI4_DATA8) F_(0, 0) FM(DU_DB0) F_(0, 0) F_(0, 0) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
250#define IP2_3_0 FM(A1) FM(LCDOUT17) FM(MSIOF3_TXD_B) F_(0, 0) FM(VI4_DATA9) F_(0, 0) FM(DU_DB1) F_(0, 0) F_(0, 0) FM(PWM4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
251#define IP2_7_4 FM(A2) FM(LCDOUT18) FM(MSIOF3_SCK_B) F_(0, 0) FM(VI4_DATA10) F_(0, 0) FM(DU_DB2) F_(0, 0) F_(0, 0) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
252#define IP2_11_8 FM(A3) FM(LCDOUT19) FM(MSIOF3_RXD_B) F_(0, 0) FM(VI4_DATA11) F_(0, 0) FM(DU_DB3) F_(0, 0) F_(0, 0) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
253
254/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* A */ /* B */ /* C - F */
255#define IP2_15_12 FM(A4) FM(LCDOUT20) FM(MSIOF3_SS1_B) F_(0, 0) FM(VI4_DATA12) FM(VI5_DATA12) FM(DU_DB4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
256#define IP2_19_16 FM(A5) FM(LCDOUT21) FM(MSIOF3_SS2_B) FM(SCK4_B) FM(VI4_DATA13) FM(VI5_DATA13) FM(DU_DB5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
257#define IP2_23_20 FM(A6) FM(LCDOUT22) FM(MSIOF2_SS1_A) FM(RX4_B) FM(VI4_DATA14) FM(VI5_DATA14) FM(DU_DB6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
258#define IP2_27_24 FM(A7) FM(LCDOUT23) FM(MSIOF2_SS2_A) FM(TX4_B) FM(VI4_DATA15) FM(VI5_DATA15) FM(DU_DB7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
259#define IP2_31_28 FM(A8) FM(RX3_B) FM(MSIOF2_SYNC_A) FM(HRX4_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(SDA6_A) FM(AVB_AVTP_MATCH_B) FM(PWM1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
260#define IP3_3_0 FM(A9) F_(0, 0) FM(MSIOF2_SCK_A) FM(CTS4_N_B) F_(0, 0) FM(VI5_VSYNC_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
261#define IP3_7_4 FM(A10) F_(0, 0) FM(MSIOF2_RXD_A) FM(RTS4_N_TANS_B) F_(0, 0) FM(VI5_HSYNC_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
262#define IP3_11_8 FM(A11) FM(TX3_B) FM(MSIOF2_TXD_A) FM(HTX4_B) FM(HSCK4) FM(VI5_FIELD) F_(0, 0) FM(SCL6_A) FM(AVB_AVTP_CAPTURE_B) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
263#define IP3_15_12 FM(A12) FM(LCDOUT12) FM(MSIOF3_SCK_C) F_(0, 0) FM(HRX4_A) FM(VI5_DATA8) FM(DU_DG4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
264#define IP3_19_16 FM(A13) FM(LCDOUT13) FM(MSIOF3_SYNC_C) F_(0, 0) FM(HTX4_A) FM(VI5_DATA9) FM(DU_DG5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
265#define IP3_23_20 FM(A14) FM(LCDOUT14) FM(MSIOF3_RXD_C) F_(0, 0) FM(HCTS4_N) FM(VI5_DATA10) FM(DU_DG6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
266#define IP3_27_24 FM(A15) FM(LCDOUT15) FM(MSIOF3_TXD_C) F_(0, 0) FM(HRTS4_N) FM(VI5_DATA11) FM(DU_DG7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
267#define IP3_31_28 FM(A16) FM(LCDOUT8) F_(0, 0) F_(0, 0) FM(VI4_FIELD) F_(0, 0) FM(DU_DG0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
268#define IP4_3_0 FM(A17) FM(LCDOUT9) F_(0, 0) F_(0, 0) FM(VI4_VSYNC_N) F_(0, 0) FM(DU_DG1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
269#define IP4_7_4 FM(A18) FM(LCDOUT10) F_(0, 0) F_(0, 0) FM(VI4_HSYNC_N) F_(0, 0) FM(DU_DG2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
270#define IP4_11_8 FM(A19) FM(LCDOUT11) F_(0, 0) F_(0, 0) FM(VI4_CLKENB) F_(0, 0) FM(DU_DG3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
271#define IP4_15_12 FM(CS0_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(VI5_CLKENB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
272#define IP4_19_16 FM(CS1_N_A26) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(VI5_CLK) F_(0, 0) FM(EX_WAIT0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
273#define IP4_23_20 FM(BS_N) FM(QSTVA_QVS) FM(MSIOF3_SCK_D) FM(SCK3) FM(HSCK3) F_(0, 0) F_(0, 0) F_(0, 0) FM(CAN1_TX) FM(CANFD1_TX) FM(IETX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
274#define IP4_27_24 FM(RD_N) F_(0, 0) FM(MSIOF3_SYNC_D) FM(RX3_A) FM(HRX3_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(CAN0_TX_A) FM(CANFD0_TX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
275#define IP4_31_28 FM(RD_WR_N) F_(0, 0) FM(MSIOF3_RXD_D) FM(TX3_A) FM(HTX3_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(CAN0_RX_A) FM(CANFD0_RX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
276#define IP5_3_0 FM(WE0_N) F_(0, 0) FM(MSIOF3_TXD_D) FM(CTS3_N) FM(HCTS3_N) F_(0, 0) F_(0, 0) FM(SCL6_B) FM(CAN_CLK) F_(0, 0) FM(IECLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
277#define IP5_7_4 FM(WE1_N) F_(0, 0) FM(MSIOF3_SS1_D) FM(RTS3_N_TANS) FM(HRTS3_N) F_(0, 0) F_(0, 0) FM(SDA6_B) FM(CAN1_RX) FM(CANFD1_RX) FM(IERX_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
278#define IP5_11_8 FM(EX_WAIT0_A) FM(QCLK) F_(0, 0) F_(0, 0) FM(VI4_CLK) F_(0, 0) FM(DU_DOTCLKOUT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
279#define IP5_15_12 FM(D0) FM(MSIOF2_SS1_B)FM(MSIOF3_SCK_A) F_(0, 0) FM(VI4_DATA16) FM(VI5_DATA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
280#define IP5_19_16 FM(D1) FM(MSIOF2_SS2_B)FM(MSIOF3_SYNC_A) F_(0, 0) FM(VI4_DATA17) FM(VI5_DATA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
281#define IP5_23_20 FM(D2) F_(0, 0) FM(MSIOF3_RXD_A) F_(0, 0) FM(VI4_DATA18) FM(VI5_DATA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
282#define IP5_27_24 FM(D3) F_(0, 0) FM(MSIOF3_TXD_A) F_(0, 0) FM(VI4_DATA19) FM(VI5_DATA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
283#define IP5_31_28 FM(D4) FM(MSIOF2_SCK_B)F_(0, 0) F_(0, 0) FM(VI4_DATA20) FM(VI5_DATA4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
284#define IP6_3_0 FM(D5) FM(MSIOF2_SYNC_B)F_(0, 0) F_(0, 0) FM(VI4_DATA21) FM(VI5_DATA5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
285#define IP6_7_4 FM(D6) FM(MSIOF2_RXD_B)F_(0, 0) F_(0, 0) FM(VI4_DATA22) FM(VI5_DATA6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
286#define IP6_11_8 FM(D7) FM(MSIOF2_TXD_B)F_(0, 0) F_(0, 0) FM(VI4_DATA23) FM(VI5_DATA7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
287#define IP6_15_12 FM(D8) FM(LCDOUT0) FM(MSIOF2_SCK_D) FM(SCK4_C) FM(VI4_DATA0_A) F_(0, 0) FM(DU_DR0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
288#define IP6_19_16 FM(D9) FM(LCDOUT1) FM(MSIOF2_SYNC_D) F_(0, 0) FM(VI4_DATA1_A) F_(0, 0) FM(DU_DR1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
289#define IP6_23_20 FM(D10) FM(LCDOUT2) FM(MSIOF2_RXD_D) FM(HRX3_B) FM(VI4_DATA2_A) FM(CTS4_N_C) FM(DU_DR2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
290#define IP6_27_24 FM(D11) FM(LCDOUT3) FM(MSIOF2_TXD_D) FM(HTX3_B) FM(VI4_DATA3_A) FM(RTS4_N_TANS_C)FM(DU_DR3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
291#define IP6_31_28 FM(D12) FM(LCDOUT4) FM(MSIOF2_SS1_D) FM(RX4_C) FM(VI4_DATA4_A) F_(0, 0) FM(DU_DR4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
292#define IP7_3_0 FM(D13) FM(LCDOUT5) FM(MSIOF2_SS2_D) FM(TX4_C) FM(VI4_DATA5_A) F_(0, 0) FM(DU_DR5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
293#define IP7_7_4 FM(D14) FM(LCDOUT6) FM(MSIOF3_SS1_A) FM(HRX3_C) FM(VI4_DATA6_A) F_(0, 0) FM(DU_DR6) FM(SCL6_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
294#define IP7_11_8 FM(D15) FM(LCDOUT7) FM(MSIOF3_SS2_A) FM(HTX3_C) FM(VI4_DATA7_A) F_(0, 0) FM(DU_DR7) FM(SDA6_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
295#define IP7_15_12 FM(FSCLKST) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
296#define IP7_19_16 FM(SD0_CLK) F_(0, 0) FM(MSIOF1_SCK_E) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_OPWM_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
297
298/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* A */ /* B */ /* C - F */
299#define IP7_23_20 FM(SD0_CMD) F_(0, 0) FM(MSIOF1_SYNC_E) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
300#define IP7_27_24 FM(SD0_DAT0) F_(0, 0) FM(MSIOF1_RXD_E) F_(0, 0) F_(0, 0) FM(TS_SCK0_B) FM(STP_ISCLK_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
301#define IP7_31_28 FM(SD0_DAT1) F_(0, 0) FM(MSIOF1_TXD_E) F_(0, 0) F_(0, 0) FM(TS_SPSYNC0_B)FM(STP_ISSYNC_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
302#define IP8_3_0 FM(SD0_DAT2) F_(0, 0) FM(MSIOF1_SS1_E) F_(0, 0) F_(0, 0) FM(TS_SDAT0_B) FM(STP_ISD_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
303#define IP8_7_4 FM(SD0_DAT3) F_(0, 0) FM(MSIOF1_SS2_E) F_(0, 0) F_(0, 0) FM(TS_SDEN0_B) FM(STP_ISEN_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
304#define IP8_11_8 FM(SD1_CLK) F_(0, 0) FM(MSIOF1_SCK_G) F_(0, 0) F_(0, 0) FM(SIM0_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
305#define IP8_15_12 FM(SD1_CMD) F_(0, 0) FM(MSIOF1_SYNC_G) F_(0, 0) F_(0, 0) FM(SIM0_D_A) FM(STP_IVCXO27_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
306#define IP8_19_16 FM(SD1_DAT0) FM(SD2_DAT4) FM(MSIOF1_RXD_G) F_(0, 0) F_(0, 0) FM(TS_SCK1_B) FM(STP_ISCLK_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
307#define IP8_23_20 FM(SD1_DAT1) FM(SD2_DAT5) FM(MSIOF1_TXD_G) F_(0, 0) F_(0, 0) FM(TS_SPSYNC1_B)FM(STP_ISSYNC_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
308#define IP8_27_24 FM(SD1_DAT2) FM(SD2_DAT6) FM(MSIOF1_SS1_G) F_(0, 0) F_(0, 0) FM(TS_SDAT1_B) FM(STP_ISD_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
309#define IP8_31_28 FM(SD1_DAT3) FM(SD2_DAT7) FM(MSIOF1_SS2_G) F_(0, 0) F_(0, 0) FM(TS_SDEN1_B) FM(STP_ISEN_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
310#define IP9_3_0 FM(SD2_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
311#define IP9_7_4 FM(SD2_DAT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
312#define IP9_11_8 FM(SD2_DAT1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
313#define IP9_15_12 FM(SD2_DAT2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
314#define IP9_19_16 FM(SD2_DAT3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
315#define IP9_23_20 FM(SD2_DS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(SATA_DEVSLP_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
316#define IP9_27_24 FM(SD3_DAT4) FM(SD2_CD_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
317#define IP9_31_28 FM(SD3_DAT5) FM(SD2_WP_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
318#define IP10_3_0 FM(SD3_DAT6) FM(SD3_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
319#define IP10_7_4 FM(SD3_DAT7) FM(SD3_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
320#define IP10_11_8 FM(SD0_CD) F_(0, 0) F_(0, 0) F_(0, 0) FM(SCL2_B) FM(SIM0_RST_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
321#define IP10_15_12 FM(SD0_WP) F_(0, 0) F_(0, 0) F_(0, 0) FM(SDA2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
322#define IP10_19_16 FM(SD1_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(SIM0_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
323#define IP10_23_20 FM(SD1_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(SIM0_D_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
324#define IP10_27_24 FM(SCK0) FM(HSCK1_B) FM(MSIOF1_SS2_B) FM(AUDIO_CLKC_B) FM(SDA2_A) FM(SIM0_RST_B) FM(STP_OPWM_0_C) FM(RIF0_CLK_B) F_(0, 0) FM(ADICHS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
325#define IP10_31_28 FM(RX0) FM(HRX1_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SCK0_C) FM(STP_ISCLK_0_C) FM(RIF0_D0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
326#define IP11_3_0 FM(TX0) FM(HTX1_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SPSYNC0_C)FM(STP_ISSYNC_0_C) FM(RIF0_D1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
327#define IP11_7_4 FM(CTS0_N) FM(HCTS1_N_B) FM(MSIOF1_SYNC_B) F_(0, 0) F_(0, 0) FM(TS_SPSYNC1_C)FM(STP_ISSYNC_1_C) FM(RIF1_SYNC_B) FM(AUDIO_CLKOUT_C) FM(ADICS_SAMP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
328#define IP11_11_8 FM(RTS0_N_TANS) FM(HRTS1_N_B) FM(MSIOF1_SS1_B) FM(AUDIO_CLKA_B) FM(SCL2_A) F_(0, 0) FM(STP_IVCXO27_1_C) FM(RIF0_SYNC_B) F_(0, 0) FM(ADICHS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
329#define IP11_15_12 FM(RX1_A) FM(HRX1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDAT0_C) FM(STP_ISD_0_C) FM(RIF1_CLK_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
330#define IP11_19_16 FM(TX1_A) FM(HTX1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDEN0_C) FM(STP_ISEN_0_C) FM(RIF1_D0_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
331#define IP11_23_20 FM(CTS1_N) FM(HCTS1_N_A) FM(MSIOF1_RXD_B) F_(0, 0) F_(0, 0) FM(TS_SDEN1_C) FM(STP_ISEN_1_C) FM(RIF1_D0_B) F_(0, 0) FM(ADIDATA) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
332#define IP11_27_24 FM(RTS1_N_TANS) FM(HRTS1_N_A) FM(MSIOF1_TXD_B) F_(0, 0) F_(0, 0) FM(TS_SDAT1_C) FM(STP_ISD_1_C) FM(RIF1_D1_B) F_(0, 0) FM(ADICHS0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
333#define IP11_31_28 FM(SCK2) FM(SCIF_CLK_B) FM(MSIOF1_SCK_B) F_(0, 0) F_(0, 0) FM(TS_SCK1_C) FM(STP_ISCLK_1_C) FM(RIF1_CLK_B) F_(0, 0) FM(ADICLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
334#define IP12_3_0 FM(TX2_A) F_(0, 0) F_(0, 0) FM(SD2_CD_B) FM(SCL1_A) F_(0, 0) FM(FMCLK_A) FM(RIF1_D1_C) F_(0, 0) FM(FSO_CFE_0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
335#define IP12_7_4 FM(RX2_A) F_(0, 0) F_(0, 0) FM(SD2_WP_B) FM(SDA1_A) F_(0, 0) FM(FMIN_A) FM(RIF1_SYNC_C) F_(0, 0) FM(FSO_CFE_1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
336#define IP12_11_8 FM(HSCK0) F_(0, 0) FM(MSIOF1_SCK_D) FM(AUDIO_CLKB_A) FM(SSI_SDATA1_B)FM(TS_SCK0_D) FM(STP_ISCLK_0_D) FM(RIF0_CLK_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
337#define IP12_15_12 FM(HRX0) F_(0, 0) FM(MSIOF1_RXD_D) F_(0, 0) FM(SSI_SDATA2_B)FM(TS_SDEN0_D) FM(STP_ISEN_0_D) FM(RIF0_D0_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
338#define IP12_19_16 FM(HTX0) F_(0, 0) FM(MSIOF1_TXD_D) F_(0, 0) FM(SSI_SDATA9_B)FM(TS_SDAT0_D) FM(STP_ISD_0_D) FM(RIF0_D1_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
339#define IP12_23_20 FM(HCTS0_N) FM(RX2_B) FM(MSIOF1_SYNC_D) F_(0, 0) FM(SSI_SCK9_A) FM(TS_SPSYNC0_D)FM(STP_ISSYNC_0_D) FM(RIF0_SYNC_C) FM(AUDIO_CLKOUT1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
340#define IP12_27_24 FM(HRTS0_N) FM(TX2_B) FM(MSIOF1_SS1_D) F_(0, 0) FM(SSI_WS9_A) F_(0, 0) FM(STP_IVCXO27_0_D) FM(BPFCLK_A) FM(AUDIO_CLKOUT2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
341
342/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* A */ /* B */ /* C - F */
343#define IP12_31_28 FM(MSIOF0_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(AUDIO_CLKOUT_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
344#define IP13_3_0 FM(MSIOF0_SS1) FM(RX5) F_(0, 0) FM(AUDIO_CLKA_C) FM(SSI_SCK2_A) F_(0, 0) FM(STP_IVCXO27_0_C) F_(0, 0) FM(AUDIO_CLKOUT3_A) F_(0, 0) FM(TCLK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
345#define IP13_7_4 FM(MSIOF0_SS2) FM(TX5) FM(MSIOF1_SS2_D) FM(AUDIO_CLKC_A) FM(SSI_WS2_A) F_(0, 0) FM(STP_OPWM_0_D) F_(0, 0) FM(AUDIO_CLKOUT_D) F_(0, 0) FM(SPEEDIN_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
346#define IP13_11_8 FM(MLB_CLK) F_(0, 0) FM(MSIOF1_SCK_F) F_(0, 0) FM(SCL1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
347#define IP13_15_12 FM(MLB_SIG) FM(RX1_B) FM(MSIOF1_SYNC_F) F_(0, 0) FM(SDA1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
348#define IP13_19_16 FM(MLB_DAT) FM(TX1_B) FM(MSIOF1_RXD_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
349#define IP13_23_20 FM(SSI_SCK0129) F_(0, 0) FM(MSIOF1_TXD_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
350#define IP13_27_24 FM(SSI_WS0129) F_(0, 0) FM(MSIOF1_SS1_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
351#define IP13_31_28 FM(SSI_SDATA0) F_(0, 0) FM(MSIOF1_SS2_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
352#define IP14_3_0 FM(SSI_SDATA1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
353#define IP14_7_4 FM(SSI_SDATA2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(SSI_SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
354#define IP14_11_8 FM(SSI_SCK34) F_(0, 0) FM(MSIOF1_SS1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_OPWM_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
355#define IP14_15_12 FM(SSI_WS34) FM(HCTS2_N_A) FM(MSIOF1_SS2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
356#define IP14_19_16 FM(SSI_SDATA3) FM(HRTS2_N_A) FM(MSIOF1_TXD_A) F_(0, 0) F_(0, 0) FM(TS_SCK0_A) FM(STP_ISCLK_0_A) FM(RIF0_D1_A) FM(RIF2_D0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
357#define IP14_23_20 FM(SSI_SCK4) FM(HRX2_A) FM(MSIOF1_SCK_A) F_(0, 0) F_(0, 0) FM(TS_SDAT0_A) FM(STP_ISD_0_A) FM(RIF0_CLK_A) FM(RIF2_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
358#define IP14_27_24 FM(SSI_WS4) FM(HTX2_A) FM(MSIOF1_SYNC_A) F_(0, 0) F_(0, 0) FM(TS_SDEN0_A) FM(STP_ISEN_0_A) FM(RIF0_SYNC_A) FM(RIF2_SYNC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
359#define IP14_31_28 FM(SSI_SDATA4) FM(HSCK2_A) FM(MSIOF1_RXD_A) F_(0, 0) F_(0, 0) FM(TS_SPSYNC0_A)FM(STP_ISSYNC_0_A) FM(RIF0_D0_A) FM(RIF2_D1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
360#define IP15_3_0 FM(SSI_SCK6) FM(USB2_PWEN) F_(0, 0) FM(SIM0_RST_D) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
361#define IP15_7_4 FM(SSI_WS6) FM(USB2_OVC) F_(0, 0) FM(SIM0_D_D) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
362#define IP15_11_8 FM(SSI_SDATA6) F_(0, 0) F_(0, 0) FM(SIM0_CLK_D) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(SATA_DEVSLP_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
363#define IP15_15_12 FM(SSI_SCK78) FM(HRX2_B) FM(MSIOF1_SCK_C) F_(0, 0) F_(0, 0) FM(TS_SCK1_A) FM(STP_ISCLK_1_A) FM(RIF1_CLK_A) FM(RIF3_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
364#define IP15_19_16 FM(SSI_WS78) FM(HTX2_B) FM(MSIOF1_SYNC_C) F_(0, 0) F_(0, 0) FM(TS_SDAT1_A) FM(STP_ISD_1_A) FM(RIF1_SYNC_A) FM(RIF3_SYNC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
365#define IP15_23_20 FM(SSI_SDATA7) FM(HCTS2_N_B) FM(MSIOF1_RXD_C) F_(0, 0) F_(0, 0) FM(TS_SDEN1_A) FM(STP_ISEN_1_A) FM(RIF1_D0_A) FM(RIF3_D0_A) F_(0, 0) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
366#define IP15_27_24 FM(SSI_SDATA8) FM(HRTS2_N_B) FM(MSIOF1_TXD_C) F_(0, 0) F_(0, 0) FM(TS_SPSYNC1_A)FM(STP_ISSYNC_1_A) FM(RIF1_D1_A) FM(RIF3_D1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
367#define IP15_31_28 FM(SSI_SDATA9_A) FM(HSCK2_B) FM(MSIOF1_SS1_C) FM(HSCK1_A) FM(SSI_WS1_B) FM(SCK1) FM(STP_IVCXO27_1_A) FM(SCK5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
368#define IP16_3_0 FM(AUDIO_CLKA_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
369#define IP16_7_4 FM(AUDIO_CLKB_B) FM(SCIF_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_1_D) FM(REMOCON_A) F_(0, 0) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
370#define IP16_11_8 FM(USB0_PWEN) F_(0, 0) F_(0, 0) FM(SIM0_RST_C) F_(0, 0) FM(TS_SCK1_D) FM(STP_ISCLK_1_D) FM(BPFCLK_B) FM(RIF3_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
371#define IP16_15_12 FM(USB0_OVC) F_(0, 0) F_(0, 0) FM(SIM0_D_C) F_(0, 0) FM(TS_SDAT1_D) FM(STP_ISD_1_D) F_(0, 0) FM(RIF3_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
372#define IP16_19_16 FM(USB1_PWEN) F_(0, 0) F_(0, 0) FM(SIM0_CLK_C) FM(SSI_SCK1_A) FM(TS_SCK0_E) FM(STP_ISCLK_0_E) FM(FMCLK_B) FM(RIF2_CLK_B) F_(0, 0) FM(SPEEDIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
373#define IP16_23_20 FM(USB1_OVC) F_(0, 0) FM(MSIOF1_SS2_C) F_(0, 0) FM(SSI_WS1_A) FM(TS_SDAT0_E) FM(STP_ISD_0_E) FM(FMIN_B) FM(RIF2_SYNC_B) F_(0, 0) FM(REMOCON_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
374#define IP16_27_24 FM(USB30_PWEN) F_(0, 0) F_(0, 0) FM(AUDIO_CLKOUT_B) FM(SSI_SCK2_B) FM(TS_SDEN1_D) FM(STP_ISEN_1_D) FM(STP_OPWM_0_E)FM(RIF3_D0_B) F_(0, 0) FM(TCLK2_B) FM(TPU0TO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
375#define IP16_31_28 FM(USB30_OVC) F_(0, 0) F_(0, 0) FM(AUDIO_CLKOUT1_B) FM(SSI_WS2_B) FM(TS_SPSYNC1_D)FM(STP_ISSYNC_1_D) FM(STP_IVCXO27_0_E)FM(RIF3_D1_B) F_(0, 0) FM(FSO_TOE_B) FM(TPU0TO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
376#define IP17_3_0 FM(USB31_PWEN) F_(0, 0) F_(0, 0) FM(AUDIO_CLKOUT2_B) FM(SSI_SCK9_B) FM(TS_SDEN0_E) FM(STP_ISEN_0_E) F_(0, 0) FM(RIF2_D0_B) F_(0, 0) F_(0, 0) FM(TPU0TO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
377#define IP17_7_4 FM(USB31_OVC) F_(0, 0) F_(0, 0) FM(AUDIO_CLKOUT3_B) FM(SSI_WS9_B) FM(TS_SPSYNC0_E)FM(STP_ISSYNC_0_E) F_(0, 0) FM(RIF2_D1_B) F_(0, 0) F_(0, 0) FM(TPU0TO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
378
379#define PINMUX_GPSR \
380\
381 GPSR6_31 \
382 GPSR6_30 \
383 GPSR6_29 \
384 GPSR6_28 \
385 GPSR1_27 GPSR6_27 \
386 GPSR1_26 GPSR6_26 \
387 GPSR1_25 GPSR5_25 GPSR6_25 \
388 GPSR1_24 GPSR5_24 GPSR6_24 \
389 GPSR1_23 GPSR5_23 GPSR6_23 \
390 GPSR1_22 GPSR5_22 GPSR6_22 \
391 GPSR1_21 GPSR5_21 GPSR6_21 \
392 GPSR1_20 GPSR5_20 GPSR6_20 \
393 GPSR1_19 GPSR5_19 GPSR6_19 \
394 GPSR1_18 GPSR5_18 GPSR6_18 \
395 GPSR1_17 GPSR4_17 GPSR5_17 GPSR6_17 \
396 GPSR1_16 GPSR4_16 GPSR5_16 GPSR6_16 \
397GPSR0_15 GPSR1_15 GPSR3_15 GPSR4_15 GPSR5_15 GPSR6_15 \
398GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR4_14 GPSR5_14 GPSR6_14 \
399GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR4_13 GPSR5_13 GPSR6_13 \
400GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR4_12 GPSR5_12 GPSR6_12 \
401GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR4_11 GPSR5_11 GPSR6_11 \
402GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 \
403GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 \
404GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 \
405GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 \
406GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 \
407GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 \
408GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 \
409GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 GPSR7_3 \
410GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 GPSR7_2 \
411GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 GPSR7_1 \
412GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0 GPSR7_0
413
414#define PINMUX_IPSR \
415\
416FM(IP0_3_0) IP0_3_0 FM(IP1_3_0) IP1_3_0 FM(IP2_3_0) IP2_3_0 FM(IP3_3_0) IP3_3_0 \
417FM(IP0_7_4) IP0_7_4 FM(IP1_7_4) IP1_7_4 FM(IP2_7_4) IP2_7_4 FM(IP3_7_4) IP3_7_4 \
418FM(IP0_11_8) IP0_11_8 FM(IP1_11_8) IP1_11_8 FM(IP2_11_8) IP2_11_8 FM(IP3_11_8) IP3_11_8 \
419FM(IP0_15_12) IP0_15_12 FM(IP1_15_12) IP1_15_12 FM(IP2_15_12) IP2_15_12 FM(IP3_15_12) IP3_15_12 \
420FM(IP0_19_16) IP0_19_16 FM(IP1_19_16) IP1_19_16 FM(IP2_19_16) IP2_19_16 FM(IP3_19_16) IP3_19_16 \
421FM(IP0_23_20) IP0_23_20 FM(IP1_23_20) IP1_23_20 FM(IP2_23_20) IP2_23_20 FM(IP3_23_20) IP3_23_20 \
422FM(IP0_27_24) IP0_27_24 FM(IP1_27_24) IP1_27_24 FM(IP2_27_24) IP2_27_24 FM(IP3_27_24) IP3_27_24 \
423FM(IP0_31_28) IP0_31_28 FM(IP1_31_28) IP1_31_28 FM(IP2_31_28) IP2_31_28 FM(IP3_31_28) IP3_31_28 \
424\
425FM(IP4_3_0) IP4_3_0 FM(IP5_3_0) IP5_3_0 FM(IP6_3_0) IP6_3_0 FM(IP7_3_0) IP7_3_0 \
426FM(IP4_7_4) IP4_7_4 FM(IP5_7_4) IP5_7_4 FM(IP6_7_4) IP6_7_4 FM(IP7_7_4) IP7_7_4 \
427FM(IP4_11_8) IP4_11_8 FM(IP5_11_8) IP5_11_8 FM(IP6_11_8) IP6_11_8 FM(IP7_11_8) IP7_11_8 \
428FM(IP4_15_12) IP4_15_12 FM(IP5_15_12) IP5_15_12 FM(IP6_15_12) IP6_15_12 FM(IP7_15_12) IP7_15_12 \
429FM(IP4_19_16) IP4_19_16 FM(IP5_19_16) IP5_19_16 FM(IP6_19_16) IP6_19_16 FM(IP7_19_16) IP7_19_16 \
430FM(IP4_23_20) IP4_23_20 FM(IP5_23_20) IP5_23_20 FM(IP6_23_20) IP6_23_20 FM(IP7_23_20) IP7_23_20 \
431FM(IP4_27_24) IP4_27_24 FM(IP5_27_24) IP5_27_24 FM(IP6_27_24) IP6_27_24 FM(IP7_27_24) IP7_27_24 \
432FM(IP4_31_28) IP4_31_28 FM(IP5_31_28) IP5_31_28 FM(IP6_31_28) IP6_31_28 FM(IP7_31_28) IP7_31_28 \
433\
434FM(IP8_3_0) IP8_3_0 FM(IP9_3_0) IP9_3_0 FM(IP10_3_0) IP10_3_0 FM(IP11_3_0) IP11_3_0 \
435FM(IP8_7_4) IP8_7_4 FM(IP9_7_4) IP9_7_4 FM(IP10_7_4) IP10_7_4 FM(IP11_7_4) IP11_7_4 \
436FM(IP8_11_8) IP8_11_8 FM(IP9_11_8) IP9_11_8 FM(IP10_11_8) IP10_11_8 FM(IP11_11_8) IP11_11_8 \
437FM(IP8_15_12) IP8_15_12 FM(IP9_15_12) IP9_15_12 FM(IP10_15_12) IP10_15_12 FM(IP11_15_12) IP11_15_12 \
438FM(IP8_19_16) IP8_19_16 FM(IP9_19_16) IP9_19_16 FM(IP10_19_16) IP10_19_16 FM(IP11_19_16) IP11_19_16 \
439FM(IP8_23_20) IP8_23_20 FM(IP9_23_20) IP9_23_20 FM(IP10_23_20) IP10_23_20 FM(IP11_23_20) IP11_23_20 \
440FM(IP8_27_24) IP8_27_24 FM(IP9_27_24) IP9_27_24 FM(IP10_27_24) IP10_27_24 FM(IP11_27_24) IP11_27_24 \
441FM(IP8_31_28) IP8_31_28 FM(IP9_31_28) IP9_31_28 FM(IP10_31_28) IP10_31_28 FM(IP11_31_28) IP11_31_28 \
442\
443FM(IP12_3_0) IP12_3_0 FM(IP13_3_0) IP13_3_0 FM(IP14_3_0) IP14_3_0 FM(IP15_3_0) IP15_3_0 \
444FM(IP12_7_4) IP12_7_4 FM(IP13_7_4) IP13_7_4 FM(IP14_7_4) IP14_7_4 FM(IP15_7_4) IP15_7_4 \
445FM(IP12_11_8) IP12_11_8 FM(IP13_11_8) IP13_11_8 FM(IP14_11_8) IP14_11_8 FM(IP15_11_8) IP15_11_8 \
446FM(IP12_15_12) IP12_15_12 FM(IP13_15_12) IP13_15_12 FM(IP14_15_12) IP14_15_12 FM(IP15_15_12) IP15_15_12 \
447FM(IP12_19_16) IP12_19_16 FM(IP13_19_16) IP13_19_16 FM(IP14_19_16) IP14_19_16 FM(IP15_19_16) IP15_19_16 \
448FM(IP12_23_20) IP12_23_20 FM(IP13_23_20) IP13_23_20 FM(IP14_23_20) IP14_23_20 FM(IP15_23_20) IP15_23_20 \
449FM(IP12_27_24) IP12_27_24 FM(IP13_27_24) IP13_27_24 FM(IP14_27_24) IP14_27_24 FM(IP15_27_24) IP15_27_24 \
450FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM(IP15_31_28) IP15_31_28 \
451\
452FM(IP16_3_0) IP16_3_0 FM(IP17_3_0) IP17_3_0 \
453FM(IP16_7_4) IP16_7_4 FM(IP17_7_4) IP17_7_4 \
454FM(IP16_11_8) IP16_11_8 \
455FM(IP16_15_12) IP16_15_12 \
456FM(IP16_19_16) IP16_19_16 \
457FM(IP16_23_20) IP16_23_20 \
458FM(IP16_27_24) IP16_27_24 \
459FM(IP16_31_28) IP16_31_28
460
461/* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
462#define MOD_SEL0_30_29 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1) FM(SEL_MSIOF3_2) FM(SEL_MSIOF3_3)
463#define MOD_SEL0_28_27 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1) FM(SEL_MSIOF2_2) FM(SEL_MSIOF2_3)
464#define MOD_SEL0_26_25_24 FM(SEL_MSIOF1_0) FM(SEL_MSIOF1_1) FM(SEL_MSIOF1_2) FM(SEL_MSIOF1_3) FM(SEL_MSIOF1_4) FM(SEL_MSIOF1_5) FM(SEL_MSIOF1_6) F_(0, 0)
465#define MOD_SEL0_23 FM(SEL_LBSC_0) FM(SEL_LBSC_1)
466#define MOD_SEL0_22 FM(SEL_IEBUS_0) FM(SEL_IEBUS_1)
467#define MOD_SEL0_21_20 FM(SEL_I2C6_0) FM(SEL_I2C6_1) FM(SEL_I2C6_2) F_(0, 0)
468#define MOD_SEL0_19 FM(SEL_I2C2_0) FM(SEL_I2C2_1)
469#define MOD_SEL0_18 FM(SEL_I2C1_0) FM(SEL_I2C1_1)
470#define MOD_SEL0_17 FM(SEL_HSCIF4_0) FM(SEL_HSCIF4_1)
471#define MOD_SEL0_16_15 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1) FM(SEL_HSCIF3_2) FM(SEL_HSCIF3_3)
472#define MOD_SEL0_14 FM(SEL_HSCIF2_0) FM(SEL_HSCIF2_1)
473#define MOD_SEL0_13 FM(SEL_HSCIF1_0) FM(SEL_HSCIF1_1)
474#define MOD_SEL0_12 FM(SEL_FSO_0) FM(SEL_FSO_1)
475#define MOD_SEL0_11 FM(SEL_FM_0) FM(SEL_FM_1)
476#define MOD_SEL0_10 FM(SEL_ETHERAVB_0) FM(SEL_ETHERAVB_1)
477#define MOD_SEL0_9 FM(SEL_DRIF3_0) FM(SEL_DRIF3_1)
478#define MOD_SEL0_8 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
479#define MOD_SEL0_7_6 FM(SEL_DRIF1_0) FM(SEL_DRIF1_1) FM(SEL_DRIF1_2) F_(0, 0)
480#define MOD_SEL0_5_4 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1) FM(SEL_DRIF0_2) F_(0, 0)
481#define MOD_SEL0_3 FM(SEL_CANFD0_0) FM(SEL_CANFD0_1)
482#define MOD_SEL0_2_1 FM(SEL_ADG_0) FM(SEL_ADG_1) FM(SEL_ADG_2) FM(SEL_ADG_3)
483
484/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
485#define MOD_SEL1_31_30 FM(SEL_TSIF1_0) FM(SEL_TSIF1_1) FM(SEL_TSIF1_2) FM(SEL_TSIF1_3)
486#define MOD_SEL1_29_28_27 FM(SEL_TSIF0_0) FM(SEL_TSIF0_1) FM(SEL_TSIF0_2) FM(SEL_TSIF0_3) FM(SEL_TSIF0_4) F_(0, 0) F_(0, 0) F_(0, 0)
487#define MOD_SEL1_26 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
488#define MOD_SEL1_25_24 FM(SEL_SSP1_1_0) FM(SEL_SSP1_1_1) FM(SEL_SSP1_1_2) FM(SEL_SSP1_1_3)
489#define MOD_SEL1_23_22_21 FM(SEL_SSP1_0_0) FM(SEL_SSP1_0_1) FM(SEL_SSP1_0_2) FM(SEL_SSP1_0_3) FM(SEL_SSP1_0_4) F_(0, 0) F_(0, 0) F_(0, 0)
490#define MOD_SEL1_20 FM(SEL_SSI_0) FM(SEL_SSI_1)
491#define MOD_SEL1_19 FM(SEL_SPEED_PULSE_0) FM(SEL_SPEED_PULSE_1)
492#define MOD_SEL1_18_17 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) FM(SEL_SIMCARD_2) FM(SEL_SIMCARD_3)
493#define MOD_SEL1_16 FM(SEL_SDHI2_0) FM(SEL_SDHI2_1)
494#define MOD_SEL1_15_14 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1) FM(SEL_SCIF4_2) F_(0, 0)
495#define MOD_SEL1_13 FM(SEL_SCIF3_0) FM(SEL_SCIF3_1)
496#define MOD_SEL1_12 FM(SEL_SCIF2_0) FM(SEL_SCIF2_1)
497#define MOD_SEL1_11 FM(SEL_SCIF1_0) FM(SEL_SCIF1_1)
498#define MOD_SEL1_10 FM(SEL_SCIF_0) FM(SEL_SCIF_1)
499#define MOD_SEL1_9 FM(SEL_REMOCON_0) FM(SEL_REMOCON_1)
500#define MOD_SEL1_6 FM(SEL_RCAN0_0) FM(SEL_RCAN0_1)
501#define MOD_SEL1_5 FM(SEL_PWM6_0) FM(SEL_PWM6_1)
502#define MOD_SEL1_4 FM(SEL_PWM5_0) FM(SEL_PWM5_1)
503#define MOD_SEL1_3 FM(SEL_PWM4_0) FM(SEL_PWM4_1)
504#define MOD_SEL1_2 FM(SEL_PWM3_0) FM(SEL_PWM3_1)
505#define MOD_SEL1_1 FM(SEL_PWM2_0) FM(SEL_PWM2_1)
506#define MOD_SEL1_0 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
507
508/* MOD_SEL2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */
509#define MOD_SEL2_31 FM(I2C_SEL_5_0) FM(I2C_SEL_5_1)
510#define MOD_SEL2_30 FM(I2C_SEL_3_0) FM(I2C_SEL_3_1)
511#define MOD_SEL2_29 FM(I2C_SEL_0_0) FM(I2C_SEL_0_1)
512#define MOD_SEL2_2_1 FM(SEL_VSP_0) FM(SEL_VSP_1) FM(SEL_VSP_2) FM(SEL_VSP_3)
513#define MOD_SEL2_0 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
514
515#define PINMUX_MOD_SELS\
516\
517 MOD_SEL1_31_30 MOD_SEL2_31 \
518MOD_SEL0_30_29 MOD_SEL2_30 \
519 MOD_SEL1_29_28_27 MOD_SEL2_29 \
520MOD_SEL0_28_27 \
521\
522MOD_SEL0_26_25_24 MOD_SEL1_26 \
523 MOD_SEL1_25_24 \
524\
525MOD_SEL0_23 MOD_SEL1_23_22_21 \
526MOD_SEL0_22 \
527MOD_SEL0_21_20 \
528 MOD_SEL1_20 \
529MOD_SEL0_19 MOD_SEL1_19 \
530MOD_SEL0_18 MOD_SEL1_18_17 \
531MOD_SEL0_17 \
532MOD_SEL0_16_15 MOD_SEL1_16 \
533 MOD_SEL1_15_14 \
534MOD_SEL0_14 \
535MOD_SEL0_13 MOD_SEL1_13 \
536MOD_SEL0_12 MOD_SEL1_12 \
537MOD_SEL0_11 MOD_SEL1_11 \
538MOD_SEL0_10 MOD_SEL1_10 \
539MOD_SEL0_9 MOD_SEL1_9 \
540MOD_SEL0_8 \
541MOD_SEL0_7_6 \
542 MOD_SEL1_6 \
543MOD_SEL0_5_4 MOD_SEL1_5 \
544 MOD_SEL1_4 \
545MOD_SEL0_3 MOD_SEL1_3 \
546MOD_SEL0_2_1 MOD_SEL1_2 MOD_SEL2_2_1 \
547 MOD_SEL1_1 \
548 MOD_SEL1_0 MOD_SEL2_0
549
550
551enum {
552 PINMUX_RESERVED = 0,
553
554 PINMUX_DATA_BEGIN,
555 GP_ALL(DATA),
556 PINMUX_DATA_END,
557
558#define F_(x, y)
559#define FM(x) FN_##x,
560 PINMUX_FUNCTION_BEGIN,
561 GP_ALL(FN),
562 PINMUX_GPSR
563 PINMUX_IPSR
564 PINMUX_MOD_SELS
565 PINMUX_FUNCTION_END,
566#undef F_
567#undef FM
568
569#define F_(x, y)
570#define FM(x) x##_MARK,
571 PINMUX_MARK_BEGIN,
572 PINMUX_GPSR
573 PINMUX_IPSR
574 PINMUX_MOD_SELS
575 PINMUX_MARK_END,
576#undef F_
577#undef FM
578};
579
580static const u16 pinmux_data[] = {
581 PINMUX_DATA_GP_ALL(),
582
583 /* IPSR0 */
584 PINMUX_IPSR_DATA(IP0_3_0, AVB_MDC),
585 PINMUX_IPSR_MSEL(IP0_3_0, MSIOF2_SS2_C, SEL_MSIOF2_2),
586
587 PINMUX_IPSR_DATA(IP0_7_4, AVB_MAGIC),
588 PINMUX_IPSR_MSEL(IP0_7_4, MSIOF2_SS1_C, SEL_MSIOF2_2),
589 PINMUX_IPSR_MSEL(IP0_7_4, SCK4_A, SEL_SCIF4_0),
590
591 PINMUX_IPSR_DATA(IP0_11_8, AVB_PHY_INT),
592 PINMUX_IPSR_MSEL(IP0_11_8, MSIOF2_SYNC_C, SEL_MSIOF2_2),
593 PINMUX_IPSR_MSEL(IP0_11_8, RX4_A, SEL_SCIF4_0),
594
595 PINMUX_IPSR_DATA(IP0_15_12, AVB_LINK),
596 PINMUX_IPSR_MSEL(IP0_15_12, MSIOF2_SCK_C, SEL_MSIOF2_2),
597 PINMUX_IPSR_MSEL(IP0_15_12, TX4_A, SEL_SCIF4_0),
598
599 PINMUX_IPSR_MSEL(IP0_19_16, AVB_AVTP_MATCH_A, SEL_ETHERAVB_0),
600 PINMUX_IPSR_MSEL(IP0_19_16, MSIOF2_RXD_C, SEL_MSIOF2_2),
601 PINMUX_IPSR_MSEL(IP0_19_16, CTS4_N_A, SEL_SCIF4_0),
602
603 PINMUX_IPSR_MSEL(IP0_23_20, AVB_AVTP_CAPTURE_A, SEL_ETHERAVB_0),
604 PINMUX_IPSR_MSEL(IP0_23_20, MSIOF2_TXD_C, SEL_MSIOF2_2),
605 PINMUX_IPSR_MSEL(IP0_23_20, RTS4_N_TANS_A, SEL_SCIF4_0),
606
607 PINMUX_IPSR_DATA(IP0_27_24, IRQ0),
608 PINMUX_IPSR_DATA(IP0_27_24, QPOLB),
609 PINMUX_IPSR_DATA(IP0_27_24, DU_CDE),
610 PINMUX_IPSR_MSEL(IP0_27_24, VI4_DATA0_B, SEL_VIN4_1),
611 PINMUX_IPSR_MSEL(IP0_27_24, CAN0_TX_B, SEL_RCAN0_1),
612 PINMUX_IPSR_MSEL(IP0_27_24, CANFD0_TX_B, SEL_CANFD0_1),
613
614 PINMUX_IPSR_DATA(IP0_31_28, IRQ1),
615 PINMUX_IPSR_DATA(IP0_31_28, QPOLA),
616 PINMUX_IPSR_DATA(IP0_31_28, DU_DISP),
617 PINMUX_IPSR_MSEL(IP0_31_28, VI4_DATA1_B, SEL_VIN4_1),
618 PINMUX_IPSR_MSEL(IP0_31_28, CAN0_RX_B, SEL_RCAN0_1),
619 PINMUX_IPSR_MSEL(IP0_31_28, CANFD0_RX_B, SEL_CANFD0_1),
620
621 /* IPSR1 */
622 PINMUX_IPSR_DATA(IP1_3_0, IRQ2),
623 PINMUX_IPSR_DATA(IP1_3_0, QCPV_QDE),
624 PINMUX_IPSR_DATA(IP1_3_0, DU_EXODDF_DU_ODDF_DISP_CDE),
625 PINMUX_IPSR_MSEL(IP1_3_0, VI4_DATA2_B, SEL_VIN4_1),
626 PINMUX_IPSR_MSEL(IP1_3_0, PWM3_B, SEL_PWM3_1),
627
628 PINMUX_IPSR_DATA(IP1_7_4, IRQ3),
629 PINMUX_IPSR_DATA(IP1_7_4, QSTVB_QVE),
630 PINMUX_IPSR_DATA(IP1_7_4, A25),
631 PINMUX_IPSR_DATA(IP1_7_4, DU_DOTCLKOUT1),
632 PINMUX_IPSR_MSEL(IP1_7_4, VI4_DATA3_B, SEL_VIN4_1),
633 PINMUX_IPSR_MSEL(IP1_7_4, PWM4_B, SEL_PWM4_1),
634
635 PINMUX_IPSR_DATA(IP1_11_8, IRQ4),
636 PINMUX_IPSR_DATA(IP1_11_8, QSTH_QHS),
637 PINMUX_IPSR_DATA(IP1_11_8, A24),
638 PINMUX_IPSR_DATA(IP1_11_8, DU_EXHSYNC_DU_HSYNC),
639 PINMUX_IPSR_MSEL(IP1_11_8, VI4_DATA4_B, SEL_VIN4_1),
640 PINMUX_IPSR_MSEL(IP1_11_8, PWM5_B, SEL_PWM5_1),
641
642 PINMUX_IPSR_DATA(IP1_15_12, IRQ5),
643 PINMUX_IPSR_DATA(IP1_15_12, QSTB_QHE),
644 PINMUX_IPSR_DATA(IP1_15_12, A23),
645 PINMUX_IPSR_DATA(IP1_15_12, DU_EXVSYNC_DU_VSYNC),
646 PINMUX_IPSR_MSEL(IP1_15_12, VI4_DATA5_B, SEL_VIN4_1),
647 PINMUX_IPSR_MSEL(IP1_15_12, PWM6_B, SEL_PWM6_1),
648
649 PINMUX_IPSR_DATA(IP1_19_16, PWM0),
650 PINMUX_IPSR_DATA(IP1_19_16, AVB_AVTP_PPS),
651 PINMUX_IPSR_DATA(IP1_19_16, A22),
652 PINMUX_IPSR_MSEL(IP1_19_16, VI4_DATA6_B, SEL_VIN4_1),
653 PINMUX_IPSR_MSEL(IP1_19_16, IECLK_B, SEL_IEBUS_1),
654
655 PINMUX_IPSR_MSEL(IP1_23_20, PWM1_A, SEL_PWM1_0),
656 PINMUX_IPSR_DATA(IP1_23_20, A21),
657 PINMUX_IPSR_MSEL(IP1_23_20, HRX3_D, SEL_HSCIF3_3),
658 PINMUX_IPSR_MSEL(IP1_23_20, VI4_DATA7_B, SEL_VIN4_1),
659 PINMUX_IPSR_MSEL(IP1_23_20, IERX_B, SEL_IEBUS_1),
660
661 PINMUX_IPSR_MSEL(IP1_27_24, PWM2_A, SEL_PWM2_0),
662 PINMUX_IPSR_DATA(IP1_27_24, A20),
663 PINMUX_IPSR_MSEL(IP1_27_24, HTX3_D, SEL_HSCIF3_3),
664 PINMUX_IPSR_MSEL(IP1_27_24, IETX_B, SEL_IEBUS_1),
665
666 PINMUX_IPSR_DATA(IP1_31_28, A0),
667 PINMUX_IPSR_DATA(IP1_31_28, LCDOUT16),
668 PINMUX_IPSR_MSEL(IP1_31_28, MSIOF3_SYNC_B, SEL_MSIOF3_1),
669 PINMUX_IPSR_DATA(IP1_31_28, VI4_DATA8),
670 PINMUX_IPSR_DATA(IP1_31_28, DU_DB0),
671 PINMUX_IPSR_MSEL(IP1_31_28, PWM3_A, SEL_PWM3_0),
672
673 /* IPSR2 */
674 PINMUX_IPSR_DATA(IP2_3_0, A1),
675 PINMUX_IPSR_DATA(IP2_3_0, LCDOUT17),
676 PINMUX_IPSR_MSEL(IP2_3_0, MSIOF3_TXD_B, SEL_MSIOF3_1),
677 PINMUX_IPSR_DATA(IP2_3_0, VI4_DATA9),
678 PINMUX_IPSR_DATA(IP2_3_0, DU_DB1),
679 PINMUX_IPSR_MSEL(IP2_3_0, PWM4_A, SEL_PWM4_0),
680
681 PINMUX_IPSR_DATA(IP2_7_4, A2),
682 PINMUX_IPSR_DATA(IP2_7_4, LCDOUT18),
683 PINMUX_IPSR_MSEL(IP2_7_4, MSIOF3_SCK_B, SEL_MSIOF3_1),
684 PINMUX_IPSR_DATA(IP2_7_4, VI4_DATA10),
685 PINMUX_IPSR_DATA(IP2_7_4, DU_DB2),
686 PINMUX_IPSR_MSEL(IP2_7_4, PWM5_A, SEL_PWM5_0),
687
688 PINMUX_IPSR_DATA(IP2_11_8, A3),
689 PINMUX_IPSR_DATA(IP2_11_8, LCDOUT19),
690 PINMUX_IPSR_MSEL(IP2_11_8, MSIOF3_RXD_B, SEL_MSIOF3_1),
691 PINMUX_IPSR_DATA(IP2_11_8, VI4_DATA11),
692 PINMUX_IPSR_DATA(IP2_11_8, DU_DB3),
693 PINMUX_IPSR_MSEL(IP2_11_8, PWM6_A, SEL_PWM6_0),
694
695 PINMUX_IPSR_DATA(IP2_15_12, A4),
696 PINMUX_IPSR_DATA(IP2_15_12, LCDOUT20),
697 PINMUX_IPSR_MSEL(IP2_15_12, MSIOF3_SS1_B, SEL_MSIOF3_1),
698 PINMUX_IPSR_DATA(IP2_15_12, VI4_DATA12),
699 PINMUX_IPSR_DATA(IP2_15_12, VI5_DATA12),
700 PINMUX_IPSR_DATA(IP2_15_12, DU_DB4),
701
702 PINMUX_IPSR_DATA(IP2_19_16, A5),
703 PINMUX_IPSR_DATA(IP2_19_16, LCDOUT21),
704 PINMUX_IPSR_MSEL(IP2_19_16, MSIOF3_SS2_B, SEL_MSIOF3_1),
705 PINMUX_IPSR_MSEL(IP2_19_16, SCK4_B, SEL_SCIF4_1),
706 PINMUX_IPSR_DATA(IP2_19_16, VI4_DATA13),
707 PINMUX_IPSR_DATA(IP2_19_16, VI5_DATA13),
708 PINMUX_IPSR_DATA(IP2_19_16, DU_DB5),
709
710 PINMUX_IPSR_DATA(IP2_23_20, A6),
711 PINMUX_IPSR_DATA(IP2_23_20, LCDOUT22),
712 PINMUX_IPSR_MSEL(IP2_23_20, MSIOF2_SS1_A, SEL_MSIOF2_0),
713 PINMUX_IPSR_MSEL(IP2_23_20, RX4_B, SEL_SCIF4_1),
714 PINMUX_IPSR_DATA(IP2_23_20, VI4_DATA14),
715 PINMUX_IPSR_DATA(IP2_23_20, VI5_DATA14),
716 PINMUX_IPSR_DATA(IP2_23_20, DU_DB6),
717
718 PINMUX_IPSR_DATA(IP2_27_24, A7),
719 PINMUX_IPSR_DATA(IP2_27_24, LCDOUT23),
720 PINMUX_IPSR_MSEL(IP2_27_24, MSIOF2_SS2_A, SEL_MSIOF2_0),
721 PINMUX_IPSR_MSEL(IP2_27_24, TX4_B, SEL_SCIF4_1),
722 PINMUX_IPSR_DATA(IP2_27_24, VI4_DATA15),
723 PINMUX_IPSR_DATA(IP2_27_24, VI5_DATA15),
724 PINMUX_IPSR_DATA(IP2_27_24, DU_DB7),
725
726 PINMUX_IPSR_DATA(IP2_31_28, A8),
727 PINMUX_IPSR_MSEL(IP2_31_28, RX3_B, SEL_SCIF3_1),
728 PINMUX_IPSR_MSEL(IP2_31_28, MSIOF2_SYNC_A, SEL_MSIOF2_0),
729 PINMUX_IPSR_MSEL(IP2_31_28, HRX4_B, SEL_HSCIF4_1),
730 PINMUX_IPSR_MSEL(IP2_31_28, SDA6_A, SEL_I2C6_0),
731 PINMUX_IPSR_MSEL(IP2_31_28, AVB_AVTP_MATCH_B, SEL_ETHERAVB_1),
732 PINMUX_IPSR_MSEL(IP2_31_28, PWM1_B, SEL_PWM1_1),
733
734 /* IPSR3 */
735 PINMUX_IPSR_DATA(IP3_3_0, A9),
736 PINMUX_IPSR_MSEL(IP3_3_0, MSIOF2_SCK_A, SEL_MSIOF2_0),
737 PINMUX_IPSR_MSEL(IP3_3_0, CTS4_N_B, SEL_SCIF4_1),
738 PINMUX_IPSR_DATA(IP3_3_0, VI5_VSYNC_N),
739
740 PINMUX_IPSR_DATA(IP3_7_4, A10),
741 PINMUX_IPSR_MSEL(IP3_7_4, MSIOF2_RXD_A, SEL_MSIOF2_0),
742 PINMUX_IPSR_MSEL(IP3_7_4, RTS4_N_TANS_B, SEL_SCIF4_1),
743 PINMUX_IPSR_DATA(IP3_7_4, VI5_HSYNC_N),
744
745 PINMUX_IPSR_DATA(IP3_11_8, A11),
746 PINMUX_IPSR_MSEL(IP3_11_8, TX3_B, SEL_SCIF3_1),
747 PINMUX_IPSR_MSEL(IP3_11_8, MSIOF2_TXD_A, SEL_MSIOF2_0),
748 PINMUX_IPSR_MSEL(IP3_11_8, HTX4_B, SEL_HSCIF4_1),
749 PINMUX_IPSR_DATA(IP3_11_8, HSCK4),
750 PINMUX_IPSR_DATA(IP3_11_8, VI5_FIELD),
751 PINMUX_IPSR_MSEL(IP3_11_8, SCL6_A, SEL_I2C6_0),
752 PINMUX_IPSR_MSEL(IP3_11_8, AVB_AVTP_CAPTURE_B, SEL_ETHERAVB_1),
753 PINMUX_IPSR_MSEL(IP3_11_8, PWM2_B, SEL_PWM2_1),
754
755 PINMUX_IPSR_DATA(IP3_15_12, A12),
756 PINMUX_IPSR_DATA(IP3_15_12, LCDOUT12),
757 PINMUX_IPSR_MSEL(IP3_15_12, MSIOF3_SCK_C, SEL_MSIOF3_2),
758 PINMUX_IPSR_MSEL(IP3_15_12, HRX4_A, SEL_HSCIF4_0),
759 PINMUX_IPSR_DATA(IP3_15_12, VI5_DATA8),
760 PINMUX_IPSR_DATA(IP3_15_12, DU_DG4),
761
762 PINMUX_IPSR_DATA(IP3_19_16, A13),
763 PINMUX_IPSR_DATA(IP3_19_16, LCDOUT13),
764 PINMUX_IPSR_MSEL(IP3_19_16, MSIOF3_SYNC_C, SEL_MSIOF3_2),
765 PINMUX_IPSR_MSEL(IP3_19_16, HTX4_A, SEL_HSCIF4_0),
766 PINMUX_IPSR_DATA(IP3_19_16, VI5_DATA9),
767 PINMUX_IPSR_DATA(IP3_19_16, DU_DG5),
768
769 PINMUX_IPSR_DATA(IP3_23_20, A14),
770 PINMUX_IPSR_DATA(IP3_23_20, LCDOUT14),
771 PINMUX_IPSR_MSEL(IP3_23_20, MSIOF3_RXD_C, SEL_MSIOF3_2),
772 PINMUX_IPSR_DATA(IP3_23_20, HCTS4_N),
773 PINMUX_IPSR_DATA(IP3_23_20, VI5_DATA10),
774 PINMUX_IPSR_DATA(IP3_23_20, DU_DG6),
775
776 PINMUX_IPSR_DATA(IP3_27_24, A15),
777 PINMUX_IPSR_DATA(IP3_27_24, LCDOUT15),
778 PINMUX_IPSR_MSEL(IP3_27_24, MSIOF3_TXD_C, SEL_MSIOF3_2),
779 PINMUX_IPSR_DATA(IP3_27_24, HRTS4_N),
780 PINMUX_IPSR_DATA(IP3_27_24, VI5_DATA11),
781 PINMUX_IPSR_DATA(IP3_27_24, DU_DG7),
782
783 PINMUX_IPSR_DATA(IP3_31_28, A16),
784 PINMUX_IPSR_DATA(IP3_31_28, LCDOUT8),
785 PINMUX_IPSR_DATA(IP3_31_28, VI4_FIELD),
786 PINMUX_IPSR_DATA(IP3_31_28, DU_DG0),
787
788 /* IPSR4 */
789 PINMUX_IPSR_DATA(IP4_3_0, A17),
790 PINMUX_IPSR_DATA(IP4_3_0, LCDOUT9),
791 PINMUX_IPSR_DATA(IP4_3_0, VI4_VSYNC_N),
792 PINMUX_IPSR_DATA(IP4_3_0, DU_DG1),
793
794 PINMUX_IPSR_DATA(IP4_7_4, A18),
795 PINMUX_IPSR_DATA(IP4_7_4, LCDOUT10),
796 PINMUX_IPSR_DATA(IP4_7_4, VI4_HSYNC_N),
797 PINMUX_IPSR_DATA(IP4_7_4, DU_DG2),
798
799 PINMUX_IPSR_DATA(IP4_11_8, A19),
800 PINMUX_IPSR_DATA(IP4_11_8, LCDOUT11),
801 PINMUX_IPSR_DATA(IP4_11_8, VI4_CLKENB),
802 PINMUX_IPSR_DATA(IP4_11_8, DU_DG3),
803
804 PINMUX_IPSR_DATA(IP4_15_12, CS0_N),
805 PINMUX_IPSR_DATA(IP4_15_12, VI5_CLKENB),
806
807 PINMUX_IPSR_DATA(IP4_19_16, CS1_N_A26),
808 PINMUX_IPSR_DATA(IP4_19_16, VI5_CLK),
809 PINMUX_IPSR_MSEL(IP4_19_16, EX_WAIT0_B, SEL_LBSC_1),
810
811 PINMUX_IPSR_DATA(IP4_23_20, BS_N),
812 PINMUX_IPSR_DATA(IP4_23_20, QSTVA_QVS),
813 PINMUX_IPSR_MSEL(IP4_23_20, MSIOF3_SCK_D, SEL_MSIOF3_3),
814 PINMUX_IPSR_DATA(IP4_23_20, SCK3),
815 PINMUX_IPSR_DATA(IP4_23_20, HSCK3),
816 PINMUX_IPSR_DATA(IP4_23_20, CAN1_TX),
817 PINMUX_IPSR_DATA(IP4_23_20, CANFD1_TX),
818 PINMUX_IPSR_MSEL(IP4_23_20, IETX_A, SEL_IEBUS_0),
819
820 PINMUX_IPSR_DATA(IP4_27_24, RD_N),
821 PINMUX_IPSR_MSEL(IP4_27_24, MSIOF3_SYNC_D, SEL_MSIOF3_3),
822 PINMUX_IPSR_MSEL(IP4_27_24, RX3_A, SEL_SCIF3_0),
823 PINMUX_IPSR_MSEL(IP4_27_24, HRX3_A, SEL_HSCIF3_0),
824 PINMUX_IPSR_MSEL(IP4_27_24, CAN0_TX_A, SEL_RCAN0_0),
825 PINMUX_IPSR_MSEL(IP4_27_24, CANFD0_TX_A, SEL_CANFD0_0),
826
827 PINMUX_IPSR_DATA(IP4_31_28, RD_WR_N),
828 PINMUX_IPSR_MSEL(IP4_31_28, MSIOF3_RXD_D, SEL_MSIOF3_3),
829 PINMUX_IPSR_MSEL(IP4_31_28, TX3_A, SEL_SCIF3_0),
830 PINMUX_IPSR_MSEL(IP4_31_28, HTX3_A, SEL_HSCIF3_0),
831 PINMUX_IPSR_MSEL(IP4_31_28, CAN0_RX_A, SEL_RCAN0_0),
832 PINMUX_IPSR_MSEL(IP4_31_28, CANFD0_RX_A, SEL_CANFD0_0),
833
834 /* IPSR5 */
835 PINMUX_IPSR_DATA(IP5_3_0, WE0_N),
836 PINMUX_IPSR_MSEL(IP5_3_0, MSIOF3_TXD_D, SEL_MSIOF3_3),
837 PINMUX_IPSR_DATA(IP5_3_0, CTS3_N),
838 PINMUX_IPSR_DATA(IP5_3_0, HCTS3_N),
839 PINMUX_IPSR_MSEL(IP5_3_0, SCL6_B, SEL_I2C6_1),
840 PINMUX_IPSR_DATA(IP5_3_0, CAN_CLK),
841 PINMUX_IPSR_MSEL(IP5_3_0, IECLK_A, SEL_IEBUS_0),
842
843 PINMUX_IPSR_DATA(IP5_7_4, WE1_N),
844 PINMUX_IPSR_MSEL(IP5_7_4, MSIOF3_SS1_D, SEL_MSIOF3_3),
845 PINMUX_IPSR_DATA(IP5_7_4, RTS3_N_TANS),
846 PINMUX_IPSR_DATA(IP5_7_4, HRTS3_N),
847 PINMUX_IPSR_MSEL(IP5_7_4, SDA6_B, SEL_I2C6_1),
848 PINMUX_IPSR_DATA(IP5_7_4, CAN1_RX),
849 PINMUX_IPSR_DATA(IP5_7_4, CANFD1_RX),
850 PINMUX_IPSR_MSEL(IP5_7_4, IERX_A, SEL_IEBUS_0),
851
852 PINMUX_IPSR_MSEL(IP5_11_8, EX_WAIT0_A, SEL_LBSC_0),
853 PINMUX_IPSR_DATA(IP5_11_8, QCLK),
854 PINMUX_IPSR_DATA(IP5_11_8, VI4_CLK),
855 PINMUX_IPSR_DATA(IP5_11_8, DU_DOTCLKOUT0),
856
857 PINMUX_IPSR_DATA(IP5_15_12, D0),
858 PINMUX_IPSR_MSEL(IP5_15_12, MSIOF2_SS1_B, SEL_MSIOF2_1),
859 PINMUX_IPSR_MSEL(IP5_15_12, MSIOF3_SCK_A, SEL_MSIOF3_0),
860 PINMUX_IPSR_DATA(IP5_15_12, VI4_DATA16),
861 PINMUX_IPSR_DATA(IP5_15_12, VI5_DATA0),
862
863 PINMUX_IPSR_DATA(IP5_19_16, D1),
864 PINMUX_IPSR_MSEL(IP5_19_16, MSIOF2_SS2_B, SEL_MSIOF2_1),
865 PINMUX_IPSR_MSEL(IP5_19_16, MSIOF3_SYNC_A, SEL_MSIOF3_0),
866 PINMUX_IPSR_DATA(IP5_19_16, VI4_DATA17),
867 PINMUX_IPSR_DATA(IP5_19_16, VI5_DATA1),
868
869 PINMUX_IPSR_DATA(IP5_23_20, D2),
870 PINMUX_IPSR_MSEL(IP5_23_20, MSIOF3_RXD_A, SEL_MSIOF3_0),
871 PINMUX_IPSR_DATA(IP5_23_20, VI4_DATA18),
872 PINMUX_IPSR_DATA(IP5_23_20, VI5_DATA2),
873
874 PINMUX_IPSR_DATA(IP5_27_24, D3),
875 PINMUX_IPSR_MSEL(IP5_27_24, MSIOF3_TXD_A, SEL_MSIOF3_0),
876 PINMUX_IPSR_DATA(IP5_27_24, VI4_DATA19),
877 PINMUX_IPSR_DATA(IP5_27_24, VI5_DATA3),
878
879 PINMUX_IPSR_DATA(IP5_31_28, D4),
880 PINMUX_IPSR_MSEL(IP5_31_28, MSIOF2_SCK_B, SEL_MSIOF2_1),
881 PINMUX_IPSR_DATA(IP5_31_28, VI4_DATA20),
882 PINMUX_IPSR_DATA(IP5_31_28, VI5_DATA4),
883
884 /* IPSR6 */
885 PINMUX_IPSR_DATA(IP6_3_0, D5),
886 PINMUX_IPSR_MSEL(IP6_3_0, MSIOF2_SYNC_B, SEL_MSIOF2_1),
887 PINMUX_IPSR_DATA(IP6_3_0, VI4_DATA21),
888 PINMUX_IPSR_DATA(IP6_3_0, VI5_DATA5),
889
890 PINMUX_IPSR_DATA(IP6_7_4, D6),
891 PINMUX_IPSR_MSEL(IP6_7_4, MSIOF2_RXD_B, SEL_MSIOF2_1),
892 PINMUX_IPSR_DATA(IP6_7_4, VI4_DATA22),
893 PINMUX_IPSR_DATA(IP6_7_4, VI5_DATA6),
894
895 PINMUX_IPSR_DATA(IP6_11_8, D7),
896 PINMUX_IPSR_MSEL(IP6_11_8, MSIOF2_TXD_B, SEL_MSIOF2_1),
897 PINMUX_IPSR_DATA(IP6_11_8, VI4_DATA23),
898 PINMUX_IPSR_DATA(IP6_11_8, VI5_DATA7),
899
900 PINMUX_IPSR_DATA(IP6_15_12, D8),
901 PINMUX_IPSR_DATA(IP6_15_12, LCDOUT0),
902 PINMUX_IPSR_MSEL(IP6_15_12, MSIOF2_SCK_D, SEL_MSIOF2_3),
903 PINMUX_IPSR_MSEL(IP6_15_12, SCK4_C, SEL_SCIF4_2),
904 PINMUX_IPSR_MSEL(IP6_15_12, VI4_DATA0_A, SEL_VIN4_0),
905 PINMUX_IPSR_DATA(IP6_15_12, DU_DR0),
906
907 PINMUX_IPSR_DATA(IP6_19_16, D9),
908 PINMUX_IPSR_DATA(IP6_19_16, LCDOUT1),
909 PINMUX_IPSR_MSEL(IP6_19_16, MSIOF2_SYNC_D, SEL_MSIOF2_3),
910 PINMUX_IPSR_MSEL(IP6_19_16, VI4_DATA1_A, SEL_VIN4_0),
911 PINMUX_IPSR_DATA(IP6_19_16, DU_DR1),
912
913 PINMUX_IPSR_DATA(IP6_23_20, D10),
914 PINMUX_IPSR_DATA(IP6_23_20, LCDOUT2),
915 PINMUX_IPSR_MSEL(IP6_23_20, MSIOF2_RXD_D, SEL_MSIOF2_3),
916 PINMUX_IPSR_MSEL(IP6_23_20, HRX3_B, SEL_HSCIF3_1),
917 PINMUX_IPSR_MSEL(IP6_23_20, VI4_DATA2_A, SEL_VIN4_0),
918 PINMUX_IPSR_MSEL(IP6_23_20, CTS4_N_C, SEL_SCIF4_2),
919 PINMUX_IPSR_DATA(IP6_23_20, DU_DR2),
920
921 PINMUX_IPSR_DATA(IP6_27_24, D11),
922 PINMUX_IPSR_DATA(IP6_27_24, LCDOUT3),
923 PINMUX_IPSR_MSEL(IP6_27_24, MSIOF2_TXD_D, SEL_MSIOF2_3),
924 PINMUX_IPSR_MSEL(IP6_27_24, HTX3_B, SEL_HSCIF3_1),
925 PINMUX_IPSR_MSEL(IP6_27_24, VI4_DATA3_A, SEL_VIN4_0),
926 PINMUX_IPSR_MSEL(IP6_27_24, RTS4_N_TANS_C, SEL_SCIF4_2),
927 PINMUX_IPSR_DATA(IP6_27_24, DU_DR3),
928
929 PINMUX_IPSR_DATA(IP6_31_28, D12),
930 PINMUX_IPSR_DATA(IP6_31_28, LCDOUT4),
931 PINMUX_IPSR_MSEL(IP6_31_28, MSIOF2_SS1_D, SEL_MSIOF2_3),
932 PINMUX_IPSR_MSEL(IP6_31_28, RX4_C, SEL_SCIF4_2),
933 PINMUX_IPSR_MSEL(IP6_31_28, VI4_DATA4_A, SEL_VIN4_0),
934 PINMUX_IPSR_DATA(IP6_31_28, DU_DR4),
935
936 /* IPSR7 */
937 PINMUX_IPSR_DATA(IP7_3_0, D13),
938 PINMUX_IPSR_DATA(IP7_3_0, LCDOUT5),
939 PINMUX_IPSR_MSEL(IP7_3_0, MSIOF2_SS2_D, SEL_MSIOF2_3),
940 PINMUX_IPSR_MSEL(IP7_3_0, TX4_C, SEL_SCIF4_2),
941 PINMUX_IPSR_MSEL(IP7_3_0, VI4_DATA5_A, SEL_VIN4_0),
942 PINMUX_IPSR_DATA(IP7_3_0, DU_DR5),
943
944 PINMUX_IPSR_DATA(IP7_7_4, D14),
945 PINMUX_IPSR_DATA(IP7_7_4, LCDOUT6),
946 PINMUX_IPSR_MSEL(IP7_7_4, MSIOF3_SS1_A, SEL_MSIOF3_0),
947 PINMUX_IPSR_MSEL(IP7_7_4, HRX3_C, SEL_HSCIF3_2),
948 PINMUX_IPSR_MSEL(IP7_7_4, VI4_DATA6_A, SEL_VIN4_0),
949 PINMUX_IPSR_DATA(IP7_7_4, DU_DR6),
950 PINMUX_IPSR_MSEL(IP7_7_4, SCL6_C, SEL_I2C6_2),
951
952 PINMUX_IPSR_DATA(IP7_11_8, D15),
953 PINMUX_IPSR_DATA(IP7_11_8, LCDOUT7),
954 PINMUX_IPSR_MSEL(IP7_11_8, MSIOF3_SS2_A, SEL_MSIOF3_0),
955 PINMUX_IPSR_MSEL(IP7_11_8, HTX3_C, SEL_HSCIF3_2),
956 PINMUX_IPSR_MSEL(IP7_11_8, VI4_DATA7_A, SEL_VIN4_0),
957 PINMUX_IPSR_DATA(IP7_11_8, DU_DR7),
958 PINMUX_IPSR_MSEL(IP7_11_8, SDA6_C, SEL_I2C6_2),
959
960 PINMUX_IPSR_DATA(IP7_15_12, FSCLKST),
961
962 PINMUX_IPSR_DATA(IP7_19_16, SD0_CLK),
963 PINMUX_IPSR_MSEL(IP7_19_16, MSIOF1_SCK_E, SEL_MSIOF1_4),
964 PINMUX_IPSR_MSEL(IP7_19_16, STP_OPWM_0_B, SEL_SSP1_0_1),
965
966 PINMUX_IPSR_DATA(IP7_23_20, SD0_CMD),
967 PINMUX_IPSR_MSEL(IP7_23_20, MSIOF1_SYNC_E, SEL_MSIOF1_4),
968 PINMUX_IPSR_MSEL(IP7_23_20, STP_IVCXO27_0_B, SEL_SSP1_0_1),
969
970 PINMUX_IPSR_DATA(IP7_27_24, SD0_DAT0),
971 PINMUX_IPSR_MSEL(IP7_27_24, MSIOF1_RXD_E, SEL_MSIOF1_4),
972 PINMUX_IPSR_MSEL(IP7_27_24, TS_SCK0_B, SEL_TSIF0_1),
973 PINMUX_IPSR_MSEL(IP7_27_24, STP_ISCLK_0_B, SEL_SSP1_0_1),
974
975 PINMUX_IPSR_DATA(IP7_31_28, SD0_DAT1),
976 PINMUX_IPSR_MSEL(IP7_31_28, MSIOF1_TXD_E, SEL_MSIOF1_4),
977 PINMUX_IPSR_MSEL(IP7_31_28, TS_SPSYNC0_B, SEL_TSIF0_1),
978 PINMUX_IPSR_MSEL(IP7_31_28, STP_ISSYNC_0_B, SEL_SSP1_0_1),
979
980 /* IPSR8 */
981 PINMUX_IPSR_DATA(IP8_3_0, SD0_DAT2),
982 PINMUX_IPSR_MSEL(IP8_3_0, MSIOF1_SS1_E, SEL_MSIOF1_4),
983 PINMUX_IPSR_MSEL(IP8_3_0, TS_SDAT0_B, SEL_TSIF0_1),
984 PINMUX_IPSR_MSEL(IP8_3_0, STP_ISD_0_B, SEL_SSP1_0_1),
985
986 PINMUX_IPSR_DATA(IP8_7_4, SD0_DAT3),
987 PINMUX_IPSR_MSEL(IP8_7_4, MSIOF1_SS2_E, SEL_MSIOF1_4),
988 PINMUX_IPSR_MSEL(IP8_7_4, TS_SDEN0_B, SEL_TSIF0_1),
989 PINMUX_IPSR_MSEL(IP8_7_4, STP_ISEN_0_B, SEL_SSP1_0_1),
990
991 PINMUX_IPSR_DATA(IP8_11_8, SD1_CLK),
992 PINMUX_IPSR_MSEL(IP8_11_8, MSIOF1_SCK_G, SEL_MSIOF1_6),
993 PINMUX_IPSR_MSEL(IP8_11_8, SIM0_CLK_A, SEL_SIMCARD_0),
994
995 PINMUX_IPSR_DATA(IP8_15_12, SD1_CMD),
996 PINMUX_IPSR_MSEL(IP8_15_12, MSIOF1_SYNC_G, SEL_MSIOF1_6),
997 PINMUX_IPSR_MSEL(IP8_15_12, SIM0_D_A, SEL_SIMCARD_0),
998 PINMUX_IPSR_MSEL(IP8_15_12, STP_IVCXO27_1_B, SEL_SSP1_1_1),
999
1000 PINMUX_IPSR_DATA(IP8_19_16, SD1_DAT0),
1001 PINMUX_IPSR_DATA(IP8_19_16, SD2_DAT4),
1002 PINMUX_IPSR_MSEL(IP8_19_16, MSIOF1_RXD_G, SEL_MSIOF1_6),
1003 PINMUX_IPSR_MSEL(IP8_19_16, TS_SCK1_B, SEL_TSIF1_1),
1004 PINMUX_IPSR_MSEL(IP8_19_16, STP_ISCLK_1_B, SEL_SSP1_1_1),
1005
1006 PINMUX_IPSR_DATA(IP8_23_20, SD1_DAT1),
1007 PINMUX_IPSR_DATA(IP8_23_20, SD2_DAT5),
1008 PINMUX_IPSR_MSEL(IP8_23_20, MSIOF1_TXD_G, SEL_MSIOF1_6),
1009 PINMUX_IPSR_MSEL(IP8_23_20, TS_SPSYNC1_B, SEL_TSIF1_1),
1010 PINMUX_IPSR_MSEL(IP8_23_20, STP_ISSYNC_1_B, SEL_SSP1_1_1),
1011
1012 PINMUX_IPSR_DATA(IP8_27_24, SD1_DAT2),
1013 PINMUX_IPSR_DATA(IP8_27_24, SD2_DAT6),
1014 PINMUX_IPSR_MSEL(IP8_27_24, MSIOF1_SS1_G, SEL_MSIOF1_6),
1015 PINMUX_IPSR_MSEL(IP8_27_24, TS_SDAT1_B, SEL_TSIF1_1),
1016 PINMUX_IPSR_MSEL(IP8_27_24, STP_ISD_1_B, SEL_SSP1_1_1),
1017
1018 PINMUX_IPSR_DATA(IP8_31_28, SD1_DAT3),
1019 PINMUX_IPSR_DATA(IP8_31_28, SD2_DAT7),
1020 PINMUX_IPSR_MSEL(IP8_31_28, MSIOF1_SS2_G, SEL_MSIOF1_6),
1021 PINMUX_IPSR_MSEL(IP8_31_28, TS_SDEN1_B, SEL_TSIF1_1),
1022 PINMUX_IPSR_MSEL(IP8_31_28, STP_ISEN_1_B, SEL_SSP1_1_1),
1023
1024 /* IPSR9 */
1025 PINMUX_IPSR_DATA(IP9_3_0, SD2_CLK),
1026
1027 PINMUX_IPSR_DATA(IP9_7_4, SD2_DAT0),
1028
1029 PINMUX_IPSR_DATA(IP9_11_8, SD2_DAT1),
1030
1031 PINMUX_IPSR_DATA(IP9_15_12, SD2_DAT2),
1032
1033 PINMUX_IPSR_DATA(IP9_19_16, SD2_DAT3),
1034
1035 PINMUX_IPSR_DATA(IP9_23_20, SD2_DS),
1036 PINMUX_IPSR_MSEL(IP9_23_20, SATA_DEVSLP_B, SEL_SCIF_1),
1037
1038 PINMUX_IPSR_DATA(IP9_27_24, SD3_DAT4),
1039 PINMUX_IPSR_MSEL(IP9_27_24, SD2_CD_A, SEL_SDHI2_0),
1040
1041 PINMUX_IPSR_DATA(IP9_31_28, SD3_DAT5),
1042 PINMUX_IPSR_MSEL(IP9_31_28, SD2_WP_A, SEL_SDHI2_0),
1043
1044 /* IPSR10 */
1045 PINMUX_IPSR_DATA(IP10_3_0, SD3_DAT6),
1046 PINMUX_IPSR_DATA(IP10_3_0, SD3_CD),
1047
1048 PINMUX_IPSR_DATA(IP10_7_4, SD3_DAT7),
1049 PINMUX_IPSR_DATA(IP10_7_4, SD3_WP),
1050
1051 PINMUX_IPSR_DATA(IP10_11_8, SD0_CD),
1052 PINMUX_IPSR_MSEL(IP10_11_8, SCL2_B, SEL_I2C2_1),
1053 PINMUX_IPSR_MSEL(IP10_11_8, SIM0_RST_A, SEL_SIMCARD_0),
1054
1055 PINMUX_IPSR_DATA(IP10_15_12, SD0_WP),
1056 PINMUX_IPSR_MSEL(IP10_15_12, SDA2_B, SEL_I2C2_1),
1057
1058 PINMUX_IPSR_DATA(IP10_19_16, SD1_CD),
1059 PINMUX_IPSR_MSEL(IP10_19_16, SIM0_CLK_B, SEL_SIMCARD_1),
1060
1061 PINMUX_IPSR_DATA(IP10_23_20, SD1_WP),
1062 PINMUX_IPSR_MSEL(IP10_23_20, SIM0_D_B, SEL_SIMCARD_1),
1063
1064 PINMUX_IPSR_DATA(IP10_27_24, SCK0),
1065 PINMUX_IPSR_MSEL(IP10_27_24, HSCK1_B, SEL_HSCIF1_1),
1066 PINMUX_IPSR_MSEL(IP10_27_24, MSIOF1_SS2_B, SEL_MSIOF1_1),
1067 PINMUX_IPSR_MSEL(IP10_27_24, AUDIO_CLKC_B, SEL_ADG_1),
1068 PINMUX_IPSR_MSEL(IP10_27_24, SDA2_A, SEL_I2C2_0),
1069 PINMUX_IPSR_MSEL(IP10_27_24, SIM0_RST_B, SEL_SIMCARD_1),
1070 PINMUX_IPSR_MSEL(IP10_27_24, STP_OPWM_0_C, SEL_SSP1_0_2),
1071 PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1),
1072 PINMUX_IPSR_DATA(IP10_27_24, ADICHS2),
1073
1074 PINMUX_IPSR_DATA(IP10_31_28, RX0),
1075 PINMUX_IPSR_MSEL(IP10_31_28, HRX1_B, SEL_HSCIF1_1),
1076 PINMUX_IPSR_MSEL(IP10_31_28, TS_SCK0_C, SEL_TSIF0_2),
1077 PINMUX_IPSR_MSEL(IP10_31_28, STP_ISCLK_0_C, SEL_SSP1_0_2),
1078 PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1),
1079
1080 /* IPSR11 */
1081 PINMUX_IPSR_DATA(IP11_3_0, TX0),
1082 PINMUX_IPSR_MSEL(IP11_3_0, HTX1_B, SEL_HSCIF1_1),
1083 PINMUX_IPSR_MSEL(IP11_3_0, TS_SPSYNC0_C, SEL_TSIF0_2),
1084 PINMUX_IPSR_MSEL(IP11_3_0, STP_ISSYNC_0_C, SEL_SSP1_0_2),
1085 PINMUX_IPSR_MSEL(IP11_3_0, RIF0_D1_B, SEL_DRIF0_1),
1086
1087 PINMUX_IPSR_DATA(IP11_7_4, CTS0_N),
1088 PINMUX_IPSR_MSEL(IP11_7_4, HCTS1_N_B, SEL_HSCIF1_1),
1089 PINMUX_IPSR_MSEL(IP11_7_4, MSIOF1_SYNC_B, SEL_MSIOF1_1),
1090 PINMUX_IPSR_MSEL(IP11_7_4, TS_SPSYNC1_C, SEL_TSIF1_2),
1091 PINMUX_IPSR_MSEL(IP11_7_4, STP_ISSYNC_1_C, SEL_SSP1_1_2),
1092 PINMUX_IPSR_MSEL(IP11_7_4, RIF1_SYNC_B, SEL_DRIF1_1),
1093 PINMUX_IPSR_MSEL(IP11_7_4, AUDIO_CLKOUT_C, SEL_ADG_2),
1094 PINMUX_IPSR_DATA(IP11_7_4, ADICS_SAMP),
1095
1096 PINMUX_IPSR_DATA(IP11_11_8, RTS0_N_TANS),
1097 PINMUX_IPSR_MSEL(IP11_11_8, HRTS1_N_B, SEL_HSCIF1_1),
1098 PINMUX_IPSR_MSEL(IP11_11_8, MSIOF1_SS1_B, SEL_MSIOF1_1),
1099 PINMUX_IPSR_MSEL(IP11_11_8, AUDIO_CLKA_B, SEL_ADG_1),
1100 PINMUX_IPSR_MSEL(IP11_11_8, SCL2_A, SEL_I2C2_0),
1101 PINMUX_IPSR_MSEL(IP11_11_8, STP_IVCXO27_1_C, SEL_SSP1_1_2),
1102 PINMUX_IPSR_MSEL(IP11_11_8, RIF0_SYNC_B, SEL_DRIF0_1),
1103 PINMUX_IPSR_DATA(IP11_11_8, ADICHS1),
1104
1105 PINMUX_IPSR_MSEL(IP11_15_12, RX1_A, SEL_SCIF1_0),
1106 PINMUX_IPSR_MSEL(IP11_15_12, HRX1_A, SEL_HSCIF1_0),
1107 PINMUX_IPSR_MSEL(IP11_15_12, TS_SDAT0_C, SEL_TSIF0_2),
1108 PINMUX_IPSR_MSEL(IP11_15_12, STP_ISD_0_C, SEL_SSP1_0_2),
1109 PINMUX_IPSR_MSEL(IP11_15_12, RIF1_CLK_C, SEL_DRIF1_2),
1110
1111 PINMUX_IPSR_MSEL(IP11_19_16, TX1_A, SEL_SCIF1_0),
1112 PINMUX_IPSR_MSEL(IP11_19_16, HTX1_A, SEL_HSCIF1_0),
1113 PINMUX_IPSR_MSEL(IP11_19_16, TS_SDEN0_C, SEL_TSIF0_2),
1114 PINMUX_IPSR_MSEL(IP11_19_16, STP_ISEN_0_C, SEL_SSP1_0_2),
1115 PINMUX_IPSR_MSEL(IP11_19_16, RIF1_D0_C, SEL_DRIF1_2),
1116
1117 PINMUX_IPSR_DATA(IP11_23_20, CTS1_N),
1118 PINMUX_IPSR_MSEL(IP11_23_20, HCTS1_N_A, SEL_HSCIF1_0),
1119 PINMUX_IPSR_MSEL(IP11_23_20, MSIOF1_RXD_B, SEL_MSIOF1_1),
1120 PINMUX_IPSR_MSEL(IP11_23_20, TS_SDEN1_C, SEL_TSIF1_2),
1121 PINMUX_IPSR_MSEL(IP11_23_20, STP_ISEN_1_C, SEL_SSP1_1_2),
1122 PINMUX_IPSR_MSEL(IP11_23_20, RIF1_D0_B, SEL_DRIF1_1),
1123 PINMUX_IPSR_DATA(IP11_23_20, ADIDATA),
1124
1125 PINMUX_IPSR_DATA(IP11_27_24, RTS1_N_TANS),
1126 PINMUX_IPSR_MSEL(IP11_27_24, HRTS1_N_A, SEL_HSCIF1_0),
1127 PINMUX_IPSR_MSEL(IP11_27_24, MSIOF1_TXD_B, SEL_MSIOF1_1),
1128 PINMUX_IPSR_MSEL(IP11_27_24, TS_SDAT1_C, SEL_TSIF1_2),
1129 PINMUX_IPSR_MSEL(IP11_27_24, STP_ISD_1_C, SEL_SSP1_1_2),
1130 PINMUX_IPSR_MSEL(IP11_27_24, RIF1_D1_B, SEL_DRIF1_1),
1131 PINMUX_IPSR_DATA(IP11_27_24, ADICHS0),
1132
1133 PINMUX_IPSR_DATA(IP11_31_28, SCK2),
1134 PINMUX_IPSR_MSEL(IP11_31_28, SCIF_CLK_B, SEL_SCIF1_1),
1135 PINMUX_IPSR_MSEL(IP11_31_28, MSIOF1_SCK_B, SEL_MSIOF1_1),
1136 PINMUX_IPSR_MSEL(IP11_31_28, TS_SCK1_C, SEL_TSIF1_2),
1137 PINMUX_IPSR_MSEL(IP11_31_28, STP_ISCLK_1_C, SEL_SSP1_1_2),
1138 PINMUX_IPSR_MSEL(IP11_31_28, RIF1_CLK_B, SEL_DRIF1_1),
1139 PINMUX_IPSR_DATA(IP11_31_28, ADICLK),
1140
1141 /* IPSR12 */
1142 PINMUX_IPSR_MSEL(IP12_3_0, TX2_A, SEL_SCIF2_0),
1143 PINMUX_IPSR_MSEL(IP12_3_0, SD2_CD_B, SEL_SDHI2_1),
1144 PINMUX_IPSR_MSEL(IP12_3_0, SCL1_A, SEL_I2C1_0),
1145 PINMUX_IPSR_MSEL(IP12_3_0, FMCLK_A, SEL_FM_0),
1146 PINMUX_IPSR_MSEL(IP12_3_0, RIF1_D1_C, SEL_DRIF1_2),
1147 PINMUX_IPSR_MSEL(IP12_3_0, FSO_CFE_0_B, SEL_FSO_1),
1148
1149 PINMUX_IPSR_MSEL(IP12_7_4, RX2_A, SEL_SCIF2_0),
1150 PINMUX_IPSR_MSEL(IP12_7_4, SD2_WP_B, SEL_SDHI2_1),
1151 PINMUX_IPSR_MSEL(IP12_7_4, SDA1_A, SEL_I2C1_0),
1152 PINMUX_IPSR_MSEL(IP12_7_4, FMIN_A, SEL_FM_0),
1153 PINMUX_IPSR_MSEL(IP12_7_4, RIF1_SYNC_C, SEL_DRIF1_2),
1154 PINMUX_IPSR_MSEL(IP12_7_4, FSO_CFE_1_B, SEL_FSO_1),
1155
1156 PINMUX_IPSR_DATA(IP12_11_8, HSCK0),
1157 PINMUX_IPSR_MSEL(IP12_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3),
1158 PINMUX_IPSR_MSEL(IP12_11_8, AUDIO_CLKB_A, SEL_ADG_0),
1159 PINMUX_IPSR_MSEL(IP12_11_8, SSI_SDATA1_B, SEL_SSI_1),
1160 PINMUX_IPSR_MSEL(IP12_11_8, TS_SCK0_D, SEL_TSIF0_3),
1161 PINMUX_IPSR_MSEL(IP12_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3),
1162 PINMUX_IPSR_MSEL(IP12_11_8, RIF0_CLK_C, SEL_DRIF0_2),
1163
1164 PINMUX_IPSR_DATA(IP12_15_12, HRX0),
1165 PINMUX_IPSR_MSEL(IP12_15_12, MSIOF1_RXD_D, SEL_MSIOF1_3),
1166 PINMUX_IPSR_MSEL(IP12_15_12, SSI_SDATA2_B, SEL_SSI_1),
1167 PINMUX_IPSR_MSEL(IP12_15_12, TS_SDEN0_D, SEL_TSIF0_3),
1168 PINMUX_IPSR_MSEL(IP12_15_12, STP_ISEN_0_D, SEL_SSP1_0_3),
1169 PINMUX_IPSR_MSEL(IP12_15_12, RIF0_D0_C, SEL_DRIF0_2),
1170
1171 PINMUX_IPSR_DATA(IP12_19_16, HTX0),
1172 PINMUX_IPSR_MSEL(IP12_19_16, MSIOF1_TXD_D, SEL_MSIOF1_3),
1173 PINMUX_IPSR_MSEL(IP12_19_16, SSI_SDATA9_B, SEL_SSI_1),
1174 PINMUX_IPSR_MSEL(IP12_19_16, TS_SDAT0_D, SEL_TSIF0_3),
1175 PINMUX_IPSR_MSEL(IP12_19_16, STP_ISD_0_D, SEL_SSP1_0_3),
1176 PINMUX_IPSR_MSEL(IP12_19_16, RIF0_D1_C, SEL_DRIF0_2),
1177
1178 PINMUX_IPSR_DATA(IP12_23_20, HCTS0_N),
1179 PINMUX_IPSR_MSEL(IP12_23_20, RX2_B, SEL_SCIF2_1),
1180 PINMUX_IPSR_MSEL(IP12_23_20, MSIOF1_SYNC_D, SEL_MSIOF1_3),
1181 PINMUX_IPSR_MSEL(IP12_23_20, SSI_SCK9_A, SEL_SSI_0),
1182 PINMUX_IPSR_MSEL(IP12_23_20, TS_SPSYNC0_D, SEL_TSIF0_3),
1183 PINMUX_IPSR_MSEL(IP12_23_20, STP_ISSYNC_0_D, SEL_SSP1_0_3),
1184 PINMUX_IPSR_MSEL(IP12_23_20, RIF0_SYNC_C, SEL_DRIF0_2),
1185 PINMUX_IPSR_MSEL(IP12_23_20, AUDIO_CLKOUT1_A, SEL_ADG_0),
1186
1187 PINMUX_IPSR_DATA(IP12_27_24, HRTS0_N),
1188 PINMUX_IPSR_MSEL(IP12_27_24, TX2_B, SEL_SCIF2_1),
1189 PINMUX_IPSR_MSEL(IP12_27_24, MSIOF1_SS1_D, SEL_MSIOF1_3),
1190 PINMUX_IPSR_MSEL(IP12_27_24, SSI_WS9_A, SEL_SSI_0),
1191 PINMUX_IPSR_MSEL(IP12_27_24, STP_IVCXO27_0_D, SEL_SSP1_0_3),
1192 PINMUX_IPSR_MSEL(IP12_27_24, BPFCLK_A, SEL_FM_0),
1193 PINMUX_IPSR_MSEL(IP12_27_24, AUDIO_CLKOUT2_A, SEL_ADG_0),
1194
1195 PINMUX_IPSR_DATA(IP12_31_28, MSIOF0_SYNC),
1196 PINMUX_IPSR_MSEL(IP12_31_28, AUDIO_CLKOUT_A, SEL_ADG_0),
1197
1198 /* IPSR13 */
1199 PINMUX_IPSR_DATA(IP13_3_0, MSIOF0_SS1),
1200 PINMUX_IPSR_DATA(IP13_3_0, RX5),
1201 PINMUX_IPSR_MSEL(IP13_3_0, AUDIO_CLKA_C, SEL_ADG_2),
1202 PINMUX_IPSR_MSEL(IP13_3_0, SSI_SCK2_A, SEL_SSI_0),
1203 PINMUX_IPSR_MSEL(IP13_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2),
1204 PINMUX_IPSR_MSEL(IP13_3_0, AUDIO_CLKOUT3_A, SEL_ADG_0),
1205 PINMUX_IPSR_MSEL(IP13_3_0, TCLK1_B, SEL_TIMER_TMU_1),
1206
1207 PINMUX_IPSR_DATA(IP13_7_4, MSIOF0_SS2),
1208 PINMUX_IPSR_DATA(IP13_7_4, TX5),
1209 PINMUX_IPSR_MSEL(IP13_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3),
1210 PINMUX_IPSR_MSEL(IP13_7_4, AUDIO_CLKC_A, SEL_ADG_0),
1211 PINMUX_IPSR_MSEL(IP13_7_4, SSI_WS2_A, SEL_SSI_0),
1212 PINMUX_IPSR_MSEL(IP13_7_4, STP_OPWM_0_D, SEL_SSP1_0_3),
1213 PINMUX_IPSR_MSEL(IP13_7_4, AUDIO_CLKOUT_D, SEL_ADG_3),
1214 PINMUX_IPSR_MSEL(IP13_7_4, SPEEDIN_B, SEL_SPEED_PULSE_1),
1215
1216 PINMUX_IPSR_DATA(IP13_11_8, MLB_CLK),
1217 PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_F, SEL_MSIOF1_5),
1218 PINMUX_IPSR_MSEL(IP13_11_8, SCL1_B, SEL_I2C1_1),
1219
1220 PINMUX_IPSR_DATA(IP13_15_12, MLB_SIG),
1221 PINMUX_IPSR_MSEL(IP13_15_12, RX1_B, SEL_SCIF1_1),
1222 PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_SYNC_F, SEL_MSIOF1_5),
1223 PINMUX_IPSR_MSEL(IP13_15_12, SDA1_B, SEL_I2C1_1),
1224
1225 PINMUX_IPSR_DATA(IP13_19_16, MLB_DAT),
1226 PINMUX_IPSR_MSEL(IP13_19_16, TX1_B, SEL_SCIF1_1),
1227 PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_RXD_F, SEL_MSIOF1_5),
1228
1229 PINMUX_IPSR_DATA(IP13_23_20, SSI_SCK0129),
1230 PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_TXD_F, SEL_MSIOF1_5),
1231
1232 PINMUX_IPSR_DATA(IP13_27_24, SSI_WS0129),
1233 PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SS1_F, SEL_MSIOF1_5),
1234
1235 PINMUX_IPSR_DATA(IP13_31_28, SSI_SDATA0),
1236 PINMUX_IPSR_MSEL(IP13_31_28, MSIOF1_SS2_F, SEL_MSIOF1_5),
1237
1238 /* IPSR14 */
1239 PINMUX_IPSR_MSEL(IP14_3_0, SSI_SDATA1_A, SEL_SSI_0),
1240
1241 PINMUX_IPSR_MSEL(IP14_7_4, SSI_SDATA2_A, SEL_SSI_0),
1242 PINMUX_IPSR_MSEL(IP14_7_4, SSI_SCK1_B, SEL_SSI_1),
1243
1244 PINMUX_IPSR_DATA(IP14_11_8, SSI_SCK34),
1245 PINMUX_IPSR_MSEL(IP14_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0),
1246 PINMUX_IPSR_MSEL(IP14_11_8, STP_OPWM_0_A, SEL_SSP1_0_0),
1247
1248 PINMUX_IPSR_DATA(IP14_15_12, SSI_WS34),
1249 PINMUX_IPSR_MSEL(IP14_15_12, HCTS2_N_A, SEL_HSCIF2_0),
1250 PINMUX_IPSR_MSEL(IP14_15_12, MSIOF1_SS2_A, SEL_MSIOF1_0),
1251 PINMUX_IPSR_MSEL(IP14_15_12, STP_IVCXO27_0_A, SEL_SSP1_0_0),
1252
1253 PINMUX_IPSR_DATA(IP14_19_16, SSI_SDATA3),
1254 PINMUX_IPSR_MSEL(IP14_19_16, HRTS2_N_A, SEL_HSCIF2_0),
1255 PINMUX_IPSR_MSEL(IP14_19_16, MSIOF1_TXD_A, SEL_MSIOF1_0),
1256 PINMUX_IPSR_MSEL(IP14_19_16, TS_SCK0_A, SEL_TSIF0_0),
1257 PINMUX_IPSR_MSEL(IP14_19_16, STP_ISCLK_0_A, SEL_SSP1_0_0),
1258 PINMUX_IPSR_MSEL(IP14_19_16, RIF0_D1_A, SEL_DRIF0_0),
1259 PINMUX_IPSR_MSEL(IP14_19_16, RIF2_D0_A, SEL_DRIF2_0),
1260
1261 PINMUX_IPSR_DATA(IP14_23_20, SSI_SCK4),
1262 PINMUX_IPSR_MSEL(IP14_23_20, HRX2_A, SEL_HSCIF2_0),
1263 PINMUX_IPSR_MSEL(IP14_23_20, MSIOF1_SCK_A, SEL_MSIOF1_0),
1264 PINMUX_IPSR_MSEL(IP14_23_20, TS_SDAT0_A, SEL_TSIF0_0),
1265 PINMUX_IPSR_MSEL(IP14_23_20, STP_ISD_0_A, SEL_SSP1_0_0),
1266 PINMUX_IPSR_MSEL(IP14_23_20, RIF0_CLK_A, SEL_DRIF0_0),
1267 PINMUX_IPSR_MSEL(IP14_23_20, RIF2_CLK_A, SEL_DRIF2_0),
1268
1269 PINMUX_IPSR_DATA(IP14_27_24, SSI_WS4),
1270 PINMUX_IPSR_MSEL(IP14_27_24, HTX2_A, SEL_HSCIF2_0),
1271 PINMUX_IPSR_MSEL(IP14_27_24, MSIOF1_SYNC_A, SEL_MSIOF1_0),
1272 PINMUX_IPSR_MSEL(IP14_27_24, TS_SDEN0_A, SEL_TSIF0_0),
1273 PINMUX_IPSR_MSEL(IP14_27_24, STP_ISEN_0_A, SEL_SSP1_0_0),
1274 PINMUX_IPSR_MSEL(IP14_27_24, RIF0_SYNC_A, SEL_DRIF0_0),
1275 PINMUX_IPSR_MSEL(IP14_27_24, RIF2_SYNC_A, SEL_DRIF2_0),
1276
1277 PINMUX_IPSR_DATA(IP14_31_28, SSI_SDATA4),
1278 PINMUX_IPSR_MSEL(IP14_31_28, HSCK2_A, SEL_HSCIF2_0),
1279 PINMUX_IPSR_MSEL(IP14_31_28, MSIOF1_RXD_A, SEL_MSIOF1_0),
1280 PINMUX_IPSR_MSEL(IP14_31_28, TS_SPSYNC0_A, SEL_TSIF0_0),
1281 PINMUX_IPSR_MSEL(IP14_31_28, STP_ISSYNC_0_A, SEL_SSP1_0_0),
1282 PINMUX_IPSR_MSEL(IP14_31_28, RIF0_D0_A, SEL_DRIF0_0),
1283 PINMUX_IPSR_MSEL(IP14_31_28, RIF2_D1_A, SEL_DRIF2_0),
1284
1285 /* IPSR15 */
1286 PINMUX_IPSR_DATA(IP15_3_0, SSI_SCK6),
1287 PINMUX_IPSR_DATA(IP15_3_0, USB2_PWEN),
1288 PINMUX_IPSR_MSEL(IP15_3_0, SIM0_RST_D, SEL_SIMCARD_3),
1289
1290 PINMUX_IPSR_DATA(IP15_7_4, SSI_WS6),
1291 PINMUX_IPSR_DATA(IP15_7_4, USB2_OVC),
1292 PINMUX_IPSR_MSEL(IP15_7_4, SIM0_D_D, SEL_SIMCARD_3),
1293
1294 PINMUX_IPSR_DATA(IP15_11_8, SSI_SDATA6),
1295 PINMUX_IPSR_MSEL(IP15_11_8, SIM0_CLK_D, SEL_SIMCARD_3),
1296 PINMUX_IPSR_MSEL(IP15_11_8, SATA_DEVSLP_A, SEL_SCIF_0),
1297
1298 PINMUX_IPSR_DATA(IP15_15_12, SSI_SCK78),
1299 PINMUX_IPSR_MSEL(IP15_15_12, HRX2_B, SEL_HSCIF2_1),
1300 PINMUX_IPSR_MSEL(IP15_15_12, MSIOF1_SCK_C, SEL_MSIOF1_2),
1301 PINMUX_IPSR_MSEL(IP15_15_12, TS_SCK1_A, SEL_TSIF1_0),
1302 PINMUX_IPSR_MSEL(IP15_15_12, STP_ISCLK_1_A, SEL_SSP1_1_0),
1303 PINMUX_IPSR_MSEL(IP15_15_12, RIF1_CLK_A, SEL_DRIF1_0),
1304 PINMUX_IPSR_MSEL(IP15_15_12, RIF3_CLK_A, SEL_DRIF3_0),
1305
1306 PINMUX_IPSR_DATA(IP15_19_16, SSI_WS78),
1307 PINMUX_IPSR_MSEL(IP15_19_16, HTX2_B, SEL_HSCIF2_1),
1308 PINMUX_IPSR_MSEL(IP15_19_16, MSIOF1_SYNC_C, SEL_MSIOF1_2),
1309 PINMUX_IPSR_MSEL(IP15_19_16, TS_SDAT1_A, SEL_TSIF1_0),
1310 PINMUX_IPSR_MSEL(IP15_19_16, STP_ISD_1_A, SEL_SSP1_1_0),
1311 PINMUX_IPSR_MSEL(IP15_19_16, RIF1_SYNC_A, SEL_DRIF1_0),
1312 PINMUX_IPSR_MSEL(IP15_19_16, RIF3_SYNC_A, SEL_DRIF3_0),
1313
1314 PINMUX_IPSR_DATA(IP15_23_20, SSI_SDATA7),
1315 PINMUX_IPSR_MSEL(IP15_23_20, HCTS2_N_B, SEL_HSCIF2_1),
1316 PINMUX_IPSR_MSEL(IP15_23_20, MSIOF1_RXD_C, SEL_MSIOF1_2),
1317 PINMUX_IPSR_MSEL(IP15_23_20, TS_SDEN1_A, SEL_TSIF1_0),
1318 PINMUX_IPSR_MSEL(IP15_23_20, STP_ISEN_1_A, SEL_SSP1_1_0),
1319 PINMUX_IPSR_MSEL(IP15_23_20, RIF1_D0_A, SEL_DRIF1_0),
1320 PINMUX_IPSR_MSEL(IP15_23_20, RIF3_D0_A, SEL_DRIF3_0),
1321 PINMUX_IPSR_MSEL(IP15_23_20, TCLK2_A, SEL_TIMER_TMU_0),
1322
1323 PINMUX_IPSR_DATA(IP15_27_24, SSI_SDATA8),
1324 PINMUX_IPSR_MSEL(IP15_27_24, HRTS2_N_B, SEL_HSCIF2_1),
1325 PINMUX_IPSR_MSEL(IP15_27_24, MSIOF1_TXD_C, SEL_MSIOF1_2),
1326 PINMUX_IPSR_MSEL(IP15_27_24, TS_SPSYNC1_A, SEL_TSIF1_0),
1327 PINMUX_IPSR_MSEL(IP15_27_24, STP_ISSYNC_1_A, SEL_SSP1_1_0),
1328 PINMUX_IPSR_MSEL(IP15_27_24, RIF1_D1_A, SEL_DRIF1_0),
1329 PINMUX_IPSR_MSEL(IP15_27_24, RIF3_D1_A, SEL_DRIF3_0),
1330
1331 PINMUX_IPSR_MSEL(IP15_31_28, SSI_SDATA9_A, SEL_SSI_0),
1332 PINMUX_IPSR_MSEL(IP15_31_28, HSCK2_B, SEL_HSCIF2_1),
1333 PINMUX_IPSR_MSEL(IP15_31_28, MSIOF1_SS1_C, SEL_MSIOF1_2),
1334 PINMUX_IPSR_MSEL(IP15_31_28, HSCK1_A, SEL_HSCIF1_0),
1335 PINMUX_IPSR_MSEL(IP15_31_28, SSI_WS1_B, SEL_SSI_1),
1336 PINMUX_IPSR_DATA(IP15_31_28, SCK1),
1337 PINMUX_IPSR_MSEL(IP15_31_28, STP_IVCXO27_1_A, SEL_SSP1_1_0),
1338 PINMUX_IPSR_DATA(IP15_31_28, SCK5),
1339
1340 /* IPSR16 */
1341 PINMUX_IPSR_MSEL(IP16_3_0, AUDIO_CLKA_A, SEL_ADG_0),
1342 PINMUX_IPSR_DATA(IP16_3_0, CC5_OSCOUT),
1343
1344 PINMUX_IPSR_MSEL(IP16_7_4, AUDIO_CLKB_B, SEL_ADG_1),
1345 PINMUX_IPSR_MSEL(IP16_7_4, SCIF_CLK_A, SEL_SCIF1_0),
1346 PINMUX_IPSR_MSEL(IP16_7_4, STP_IVCXO27_1_D, SEL_SSP1_1_3),
1347 PINMUX_IPSR_MSEL(IP16_7_4, REMOCON_A, SEL_REMOCON_0),
1348 PINMUX_IPSR_MSEL(IP16_7_4, TCLK1_A, SEL_TIMER_TMU_0),
1349
1350 PINMUX_IPSR_DATA(IP16_11_8, USB0_PWEN),
1351 PINMUX_IPSR_MSEL(IP16_11_8, SIM0_RST_C, SEL_SIMCARD_2),
1352 PINMUX_IPSR_MSEL(IP16_11_8, TS_SCK1_D, SEL_TSIF1_3),
1353 PINMUX_IPSR_MSEL(IP16_11_8, STP_ISCLK_1_D, SEL_SSP1_1_3),
1354 PINMUX_IPSR_MSEL(IP16_11_8, BPFCLK_B, SEL_FM_1),
1355 PINMUX_IPSR_MSEL(IP16_11_8, RIF3_CLK_B, SEL_DRIF3_1),
1356
1357 PINMUX_IPSR_DATA(IP16_15_12, USB0_OVC),
1358 PINMUX_IPSR_MSEL(IP16_11_8, SIM0_D_C, SEL_SIMCARD_2),
1359 PINMUX_IPSR_MSEL(IP16_11_8, TS_SDAT1_D, SEL_TSIF1_3),
1360 PINMUX_IPSR_MSEL(IP16_11_8, STP_ISD_1_D, SEL_SSP1_1_3),
1361 PINMUX_IPSR_MSEL(IP16_11_8, RIF3_SYNC_B, SEL_DRIF3_1),
1362
1363 PINMUX_IPSR_DATA(IP16_19_16, USB1_PWEN),
1364 PINMUX_IPSR_MSEL(IP16_19_16, SIM0_CLK_C, SEL_SIMCARD_2),
1365 PINMUX_IPSR_MSEL(IP16_19_16, SSI_SCK1_A, SEL_SSI_0),
1366 PINMUX_IPSR_MSEL(IP16_19_16, TS_SCK0_E, SEL_TSIF0_4),
1367 PINMUX_IPSR_MSEL(IP16_19_16, STP_ISCLK_0_E, SEL_SSP1_0_4),
1368 PINMUX_IPSR_MSEL(IP16_19_16, FMCLK_B, SEL_FM_1),
1369 PINMUX_IPSR_MSEL(IP16_19_16, RIF2_CLK_B, SEL_DRIF2_1),
1370 PINMUX_IPSR_MSEL(IP16_19_16, SPEEDIN_A, SEL_SPEED_PULSE_0),
1371
1372 PINMUX_IPSR_DATA(IP16_23_20, USB1_OVC),
1373 PINMUX_IPSR_MSEL(IP16_23_20, MSIOF1_SS2_C, SEL_MSIOF1_2),
1374 PINMUX_IPSR_MSEL(IP16_23_20, SSI_WS1_A, SEL_SSI_0),
1375 PINMUX_IPSR_MSEL(IP16_23_20, TS_SDAT0_E, SEL_TSIF0_4),
1376 PINMUX_IPSR_MSEL(IP16_23_20, STP_ISD_0_E, SEL_SSP1_0_4),
1377 PINMUX_IPSR_MSEL(IP16_23_20, FMIN_B, SEL_FM_1),
1378 PINMUX_IPSR_MSEL(IP16_23_20, RIF2_SYNC_B, SEL_DRIF2_1),
1379 PINMUX_IPSR_MSEL(IP16_23_20, REMOCON_B, SEL_REMOCON_1),
1380
1381 PINMUX_IPSR_DATA(IP16_27_24, USB30_PWEN),
1382 PINMUX_IPSR_MSEL(IP16_27_24, AUDIO_CLKOUT_B, SEL_ADG_1),
1383 PINMUX_IPSR_MSEL(IP16_27_24, SSI_SCK2_B, SEL_SSI_1),
1384 PINMUX_IPSR_MSEL(IP16_27_24, TS_SDEN1_D, SEL_TSIF1_3),
1385 PINMUX_IPSR_MSEL(IP16_27_24, STP_ISEN_1_D, SEL_SSP1_1_2),
1386 PINMUX_IPSR_MSEL(IP16_27_24, STP_OPWM_0_E, SEL_SSP1_0_4),
1387 PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D0_B, SEL_DRIF3_1),
1388 PINMUX_IPSR_MSEL(IP16_27_24, TCLK2_B, SEL_TIMER_TMU_1),
1389 PINMUX_IPSR_DATA(IP16_27_24, TPU0TO0),
1390
1391 PINMUX_IPSR_DATA(IP16_31_28, USB30_OVC),
1392 PINMUX_IPSR_MSEL(IP16_31_28, AUDIO_CLKOUT1_B, SEL_ADG_1),
1393 PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS2_B, SEL_SSI_1),
1394 PINMUX_IPSR_MSEL(IP16_31_28, TS_SPSYNC1_D, SEL_TSIF1_3),
1395 PINMUX_IPSR_MSEL(IP16_31_28, STP_ISSYNC_1_D, SEL_SSP1_1_3),
1396 PINMUX_IPSR_MSEL(IP16_31_28, STP_IVCXO27_0_E, SEL_SSP1_0_4),
1397 PINMUX_IPSR_MSEL(IP16_31_28, RIF3_D1_B, SEL_DRIF3_1),
1398 PINMUX_IPSR_MSEL(IP16_31_28, FSO_TOE_B, SEL_FSO_1),
1399 PINMUX_IPSR_DATA(IP16_31_28, TPU0TO1),
1400
1401 /* IPSR17 */
1402 PINMUX_IPSR_DATA(IP17_3_0, USB31_PWEN),
1403 PINMUX_IPSR_MSEL(IP17_3_0, AUDIO_CLKOUT2_B, SEL_ADG_1),
1404 PINMUX_IPSR_MSEL(IP17_3_0, SSI_SCK9_B, SEL_SSI_1),
1405 PINMUX_IPSR_MSEL(IP17_3_0, TS_SDEN0_E, SEL_TSIF0_4),
1406 PINMUX_IPSR_MSEL(IP17_3_0, STP_ISEN_0_E, SEL_SSP1_0_4),
1407 PINMUX_IPSR_MSEL(IP17_3_0, RIF2_D0_B, SEL_DRIF2_1),
1408 PINMUX_IPSR_DATA(IP17_3_0, TPU0TO2),
1409
1410 PINMUX_IPSR_DATA(IP17_7_4, USB31_OVC),
1411 PINMUX_IPSR_MSEL(IP17_7_4, AUDIO_CLKOUT3_B, SEL_ADG_1),
1412 PINMUX_IPSR_MSEL(IP17_7_4, SSI_WS9_B, SEL_SSI_1),
1413 PINMUX_IPSR_MSEL(IP17_7_4, TS_SPSYNC0_E, SEL_TSIF0_4),
1414 PINMUX_IPSR_MSEL(IP17_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4),
1415 PINMUX_IPSR_MSEL(IP17_7_4, RIF2_D1_B, SEL_DRIF2_1),
1416 PINMUX_IPSR_DATA(IP17_7_4, TPU0TO3),
1417
1418 /* I2C */
1419 PINMUX_IPSR_NOGP(0, I2C_SEL_0_1),
1420 PINMUX_IPSR_NOGP(0, I2C_SEL_3_1),
1421 PINMUX_IPSR_NOGP(0, I2C_SEL_5_1),
1422};
1423
1424static const struct sh_pfc_pin pinmux_pins[] = {
1425 PINMUX_GPIO_GP_ALL(),
1426};
1427
1428/* - AUDIO CLOCK ------------------------------------------------------------ */
1429static const unsigned int audio_clk_a_a_pins[] = {
1430 /* CLK A */
1431 RCAR_GP_PIN(6, 22),
1432};
1433static const unsigned int audio_clk_a_a_mux[] = {
1434 AUDIO_CLKA_A_MARK,
1435};
1436static const unsigned int audio_clk_a_b_pins[] = {
1437 /* CLK A */
1438 RCAR_GP_PIN(5, 4),
1439};
1440static const unsigned int audio_clk_a_b_mux[] = {
1441 AUDIO_CLKA_B_MARK,
1442};
1443static const unsigned int audio_clk_a_c_pins[] = {
1444 /* CLK A */
1445 RCAR_GP_PIN(5, 19),
1446};
1447static const unsigned int audio_clk_a_c_mux[] = {
1448 AUDIO_CLKA_C_MARK,
1449};
1450static const unsigned int audio_clk_b_a_pins[] = {
1451 /* CLK B */
1452 RCAR_GP_PIN(5, 12),
1453};
1454static const unsigned int audio_clk_b_a_mux[] = {
1455 AUDIO_CLKB_A_MARK,
1456};
1457static const unsigned int audio_clk_b_b_pins[] = {
1458 /* CLK B */
1459 RCAR_GP_PIN(6, 23),
1460};
1461static const unsigned int audio_clk_b_b_mux[] = {
1462 AUDIO_CLKB_B_MARK,
1463};
1464static const unsigned int audio_clk_c_a_pins[] = {
1465 /* CLK C */
1466 RCAR_GP_PIN(5, 21),
1467};
1468static const unsigned int audio_clk_c_a_mux[] = {
1469 AUDIO_CLKC_A_MARK,
1470};
1471static const unsigned int audio_clk_c_b_pins[] = {
1472 /* CLK C */
1473 RCAR_GP_PIN(5, 0),
1474};
1475static const unsigned int audio_clk_c_b_mux[] = {
1476 AUDIO_CLKC_B_MARK,
1477};
1478static const unsigned int audio_clkout_a_pins[] = {
1479 /* CLKOUT */
1480 RCAR_GP_PIN(5, 18),
1481};
1482static const unsigned int audio_clkout_a_mux[] = {
1483 AUDIO_CLKOUT_A_MARK,
1484};
1485static const unsigned int audio_clkout_b_pins[] = {
1486 /* CLKOUT */
1487 RCAR_GP_PIN(6, 28),
1488};
1489static const unsigned int audio_clkout_b_mux[] = {
1490 AUDIO_CLKOUT_B_MARK,
1491};
1492static const unsigned int audio_clkout_c_pins[] = {
1493 /* CLKOUT */
1494 RCAR_GP_PIN(5, 3),
1495};
1496static const unsigned int audio_clkout_c_mux[] = {
1497 AUDIO_CLKOUT_C_MARK,
1498};
1499static const unsigned int audio_clkout_d_pins[] = {
1500 /* CLKOUT */
1501 RCAR_GP_PIN(5, 21),
1502};
1503static const unsigned int audio_clkout_d_mux[] = {
1504 AUDIO_CLKOUT_D_MARK,
1505};
1506static const unsigned int audio_clkout1_a_pins[] = {
1507 /* CLKOUT1 */
1508 RCAR_GP_PIN(5, 15),
1509};
1510static const unsigned int audio_clkout1_a_mux[] = {
1511 AUDIO_CLKOUT1_A_MARK,
1512};
1513static const unsigned int audio_clkout1_b_pins[] = {
1514 /* CLKOUT1 */
1515 RCAR_GP_PIN(6, 29),
1516};
1517static const unsigned int audio_clkout1_b_mux[] = {
1518 AUDIO_CLKOUT1_B_MARK,
1519};
1520static const unsigned int audio_clkout2_a_pins[] = {
1521 /* CLKOUT2 */
1522 RCAR_GP_PIN(5, 16),
1523};
1524static const unsigned int audio_clkout2_a_mux[] = {
1525 AUDIO_CLKOUT2_A_MARK,
1526};
1527static const unsigned int audio_clkout2_b_pins[] = {
1528 /* CLKOUT2 */
1529 RCAR_GP_PIN(6, 30),
1530};
1531static const unsigned int audio_clkout2_b_mux[] = {
1532 AUDIO_CLKOUT2_B_MARK,
1533};
1534
1535static const unsigned int audio_clkout3_a_pins[] = {
1536 /* CLKOUT3 */
1537 RCAR_GP_PIN(5, 19),
1538};
1539static const unsigned int audio_clkout3_a_mux[] = {
1540 AUDIO_CLKOUT3_A_MARK,
1541};
1542static const unsigned int audio_clkout3_b_pins[] = {
1543 /* CLKOUT3 */
1544 RCAR_GP_PIN(6, 31),
1545};
1546static const unsigned int audio_clkout3_b_mux[] = {
1547 AUDIO_CLKOUT3_B_MARK,
1548};
1549
1550/* - EtherAVB --------------------------------------------------------------- */
1551static const unsigned int avb_link_pins[] = {
1552 /* AVB_LINK */
1553 RCAR_GP_PIN(2, 12),
1554};
1555static const unsigned int avb_link_mux[] = {
1556 AVB_LINK_MARK,
1557};
1558static const unsigned int avb_magic_pins[] = {
1559 /* AVB_MAGIC_ */
1560 RCAR_GP_PIN(2, 10),
1561};
1562static const unsigned int avb_magic_mux[] = {
1563 AVB_MAGIC_MARK,
1564};
1565static const unsigned int avb_phy_int_pins[] = {
1566 /* AVB_PHY_INT */
1567 RCAR_GP_PIN(2, 11),
1568};
1569static const unsigned int avb_phy_int_mux[] = {
1570 AVB_PHY_INT_MARK,
1571};
1572static const unsigned int avb_mdc_pins[] = {
1573 /* AVB_MDC */
1574 RCAR_GP_PIN(2, 9),
1575};
1576static const unsigned int avb_mdc_mux[] = {
1577 AVB_MDC_MARK,
1578};
1579static const unsigned int avb_avtp_pps_pins[] = {
1580 /* AVB_AVTP_PPS */
1581 RCAR_GP_PIN(2, 6),
1582};
1583static const unsigned int avb_avtp_pps_mux[] = {
1584 AVB_AVTP_PPS_MARK,
1585};
1586static const unsigned int avb_avtp_match_a_pins[] = {
1587 /* AVB_AVTP_MATCH_A */
1588 RCAR_GP_PIN(2, 13),
1589};
1590static const unsigned int avb_avtp_match_a_mux[] = {
1591 AVB_AVTP_MATCH_A_MARK,
1592};
1593static const unsigned int avb_avtp_capture_a_pins[] = {
1594 /* AVB_AVTP_CAPTURE_A */
1595 RCAR_GP_PIN(2, 14),
1596};
1597static const unsigned int avb_avtp_capture_a_mux[] = {
1598 AVB_AVTP_CAPTURE_A_MARK,
1599};
1600static const unsigned int avb_avtp_match_b_pins[] = {
1601 /* AVB_AVTP_MATCH_B */
1602 RCAR_GP_PIN(1, 8),
1603};
1604static const unsigned int avb_avtp_match_b_mux[] = {
1605 AVB_AVTP_MATCH_B_MARK,
1606};
1607static const unsigned int avb_avtp_capture_b_pins[] = {
1608 /* AVB_AVTP_CAPTURE_B */
1609 RCAR_GP_PIN(1, 11),
1610};
1611static const unsigned int avb_avtp_capture_b_mux[] = {
1612 AVB_AVTP_CAPTURE_B_MARK,
1613};
1614
1615/* - I2C -------------------------------------------------------------------- */
1616static const unsigned int i2c1_a_pins[] = {
1617 /* SDA, SCL */
1618 RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
1619};
1620static const unsigned int i2c1_a_mux[] = {
1621 SDA1_A_MARK, SCL1_A_MARK,
1622};
1623static const unsigned int i2c1_b_pins[] = {
1624 /* SDA, SCL */
1625 RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
1626};
1627static const unsigned int i2c1_b_mux[] = {
1628 SDA1_B_MARK, SCL1_B_MARK,
1629};
1630static const unsigned int i2c2_a_pins[] = {
1631 /* SDA, SCL */
1632 RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
1633};
1634static const unsigned int i2c2_a_mux[] = {
1635 SDA2_A_MARK, SCL2_A_MARK,
1636};
1637static const unsigned int i2c2_b_pins[] = {
1638 /* SDA, SCL */
1639 RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
1640};
1641static const unsigned int i2c2_b_mux[] = {
1642 SDA2_B_MARK, SCL2_B_MARK,
1643};
1644static const unsigned int i2c6_a_pins[] = {
1645 /* SDA, SCL */
1646 RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
1647};
1648static const unsigned int i2c6_a_mux[] = {
1649 SDA6_A_MARK, SCL6_A_MARK,
1650};
1651static const unsigned int i2c6_b_pins[] = {
1652 /* SDA, SCL */
1653 RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
1654};
1655static const unsigned int i2c6_b_mux[] = {
1656 SDA6_B_MARK, SCL6_B_MARK,
1657};
1658static const unsigned int i2c6_c_pins[] = {
1659 /* SDA, SCL */
1660 RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
1661};
1662static const unsigned int i2c6_c_mux[] = {
1663 SDA6_C_MARK, SCL6_C_MARK,
1664};
1665
1666/* - SCIF0 ------------------------------------------------------------------ */
1667static const unsigned int scif0_data_pins[] = {
1668 /* RX, TX */
1669 RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
1670};
1671static const unsigned int scif0_data_mux[] = {
1672 RX0_MARK, TX0_MARK,
1673};
1674static const unsigned int scif0_clk_pins[] = {
1675 /* SCK */
1676 RCAR_GP_PIN(5, 0),
1677};
1678static const unsigned int scif0_clk_mux[] = {
1679 SCK0_MARK,
1680};
1681static const unsigned int scif0_ctrl_pins[] = {
1682 /* RTS, CTS */
1683 RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
1684};
1685static const unsigned int scif0_ctrl_mux[] = {
1686 RTS0_N_TANS_MARK, CTS0_N_MARK,
1687};
1688/* - SCIF1 ------------------------------------------------------------------ */
1689static const unsigned int scif1_data_a_pins[] = {
1690 /* RX, TX */
1691 RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
1692};
1693static const unsigned int scif1_data_a_mux[] = {
1694 RX1_A_MARK, TX1_A_MARK,
1695};
1696static const unsigned int scif1_clk_pins[] = {
1697 /* SCK */
1698 RCAR_GP_PIN(6, 21),
1699};
1700static const unsigned int scif1_clk_mux[] = {
1701 SCK1_MARK,
1702};
1703static const unsigned int scif1_ctrl_pins[] = {
1704 /* RTS, CTS */
1705 RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
1706};
1707static const unsigned int scif1_ctrl_mux[] = {
1708 RTS1_N_TANS_MARK, CTS1_N_MARK,
1709};
1710
1711static const unsigned int scif1_data_b_pins[] = {
1712 /* RX, TX */
1713 RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 25),
1714};
1715static const unsigned int scif1_data_b_mux[] = {
1716 RX1_B_MARK, TX1_B_MARK,
1717};
1718/* - SCIF2 ------------------------------------------------------------------ */
1719static const unsigned int scif2_data_a_pins[] = {
1720 /* RX, TX */
1721 RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
1722};
1723static const unsigned int scif2_data_a_mux[] = {
1724 RX2_A_MARK, TX2_A_MARK,
1725};
1726static const unsigned int scif2_clk_pins[] = {
1727 /* SCK */
1728 RCAR_GP_PIN(5, 9),
1729};
1730static const unsigned int scif2_clk_mux[] = {
1731 SCK2_MARK,
1732};
1733static const unsigned int scif2_data_b_pins[] = {
1734 /* RX, TX */
1735 RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
1736};
1737static const unsigned int scif2_data_b_mux[] = {
1738 RX2_B_MARK, TX2_B_MARK,
1739};
1740/* - SCIF3 ------------------------------------------------------------------ */
1741static const unsigned int scif3_data_a_pins[] = {
1742 /* RX, TX */
1743 RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
1744};
1745static const unsigned int scif3_data_a_mux[] = {
1746 RX3_A_MARK, TX3_A_MARK,
1747};
1748static const unsigned int scif3_clk_pins[] = {
1749 /* SCK */
1750 RCAR_GP_PIN(1, 22),
1751};
1752static const unsigned int scif3_clk_mux[] = {
1753 SCK3_MARK,
1754};
1755static const unsigned int scif3_ctrl_pins[] = {
1756 /* RTS, CTS */
1757 RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
1758};
1759static const unsigned int scif3_ctrl_mux[] = {
1760 RTS3_N_TANS_MARK, CTS3_N_MARK,
1761};
1762static const unsigned int scif3_data_b_pins[] = {
1763 /* RX, TX */
1764 RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
1765};
1766static const unsigned int scif3_data_b_mux[] = {
1767 RX3_B_MARK, TX3_B_MARK,
1768};
1769/* - SCIF4 ------------------------------------------------------------------ */
1770static const unsigned int scif4_data_a_pins[] = {
1771 /* RX, TX */
1772 RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 12),
1773};
1774static const unsigned int scif4_data_a_mux[] = {
1775 RX4_A_MARK, TX4_A_MARK,
1776};
1777static const unsigned int scif4_clk_a_pins[] = {
1778 /* SCK */
1779 RCAR_GP_PIN(2, 10),
1780};
1781static const unsigned int scif4_clk_a_mux[] = {
1782 SCK4_A_MARK,
1783};
1784static const unsigned int scif4_ctrl_a_pins[] = {
1785 /* RTS, CTS */
1786 RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
1787};
1788static const unsigned int scif4_ctrl_a_mux[] = {
1789 RTS4_N_TANS_A_MARK, CTS4_N_A_MARK,
1790};
1791static const unsigned int scif4_data_b_pins[] = {
1792 /* RX, TX */
1793 RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
1794};
1795static const unsigned int scif4_data_b_mux[] = {
1796 RX4_B_MARK, TX4_B_MARK,
1797};
1798static const unsigned int scif4_clk_b_pins[] = {
1799 /* SCK */
1800 RCAR_GP_PIN(1, 5),
1801};
1802static const unsigned int scif4_clk_b_mux[] = {
1803 SCK4_B_MARK,
1804};
1805static const unsigned int scif4_ctrl_b_pins[] = {
1806 /* RTS, CTS */
1807 RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
1808};
1809static const unsigned int scif4_ctrl_b_mux[] = {
1810 RTS4_N_TANS_B_MARK, CTS4_N_B_MARK,
1811};
1812static const unsigned int scif4_data_c_pins[] = {
1813 /* RX, TX */
1814 RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
1815};
1816static const unsigned int scif4_data_c_mux[] = {
1817 RX4_C_MARK, TX4_C_MARK,
1818};
1819static const unsigned int scif4_clk_c_pins[] = {
1820 /* SCK */
1821 RCAR_GP_PIN(0, 8),
1822};
1823static const unsigned int scif4_clk_c_mux[] = {
1824 SCK4_C_MARK,
1825};
1826static const unsigned int scif4_ctrl_c_pins[] = {
1827 /* RTS, CTS */
1828 RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
1829};
1830static const unsigned int scif4_ctrl_c_mux[] = {
1831 RTS4_N_TANS_C_MARK, CTS4_N_C_MARK,
1832};
1833/* - SCIF5 ------------------------------------------------------------------ */
1834static const unsigned int scif5_data_pins[] = {
1835 /* RX, TX */
1836 RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
1837};
1838static const unsigned int scif5_data_mux[] = {
1839 RX5_MARK, TX5_MARK,
1840};
1841static const unsigned int scif5_clk_pins[] = {
1842 /* SCK */
1843 RCAR_GP_PIN(6, 21),
1844};
1845static const unsigned int scif5_clk_mux[] = {
1846 SCK5_MARK,
1847};
1848
1849/* - SSI -------------------------------------------------------------------- */
1850static const unsigned int ssi0_data_pins[] = {
1851 /* SDATA */
1852 RCAR_GP_PIN(6, 2),
1853};
1854static const unsigned int ssi0_data_mux[] = {
1855 SSI_SDATA0_MARK,
1856};
1857static const unsigned int ssi01239_ctrl_pins[] = {
1858 /* SCK, WS */
1859 RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
1860};
1861static const unsigned int ssi01239_ctrl_mux[] = {
1862 SSI_SCK0129_MARK, SSI_WS0129_MARK,
1863};
1864static const unsigned int ssi1_data_a_pins[] = {
1865 /* SDATA */
1866 RCAR_GP_PIN(6, 3),
1867};
1868static const unsigned int ssi1_data_a_mux[] = {
1869 SSI_SDATA1_A_MARK,
1870};
1871static const unsigned int ssi1_data_b_pins[] = {
1872 /* SDATA */
1873 RCAR_GP_PIN(5, 12),
1874};
1875static const unsigned int ssi1_data_b_mux[] = {
1876 SSI_SDATA1_B_MARK,
1877};
1878static const unsigned int ssi1_ctrl_a_pins[] = {
1879 /* SCK, WS */
1880 RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
1881};
1882static const unsigned int ssi1_ctrl_a_mux[] = {
1883 SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
1884};
1885static const unsigned int ssi1_ctrl_b_pins[] = {
1886 /* SCK, WS */
1887 RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
1888};
1889static const unsigned int ssi1_ctrl_b_mux[] = {
1890 SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
1891};
1892static const unsigned int ssi2_data_a_pins[] = {
1893 /* SDATA */
1894 RCAR_GP_PIN(6, 4),
1895};
1896static const unsigned int ssi2_data_a_mux[] = {
1897 SSI_SDATA2_A_MARK,
1898};
1899static const unsigned int ssi2_data_b_pins[] = {
1900 /* SDATA */
1901 RCAR_GP_PIN(5, 13),
1902};
1903static const unsigned int ssi2_data_b_mux[] = {
1904 SSI_SDATA2_B_MARK,
1905};
1906static const unsigned int ssi2_ctrl_a_pins[] = {
1907 /* SCK, WS */
1908 RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
1909};
1910static const unsigned int ssi2_ctrl_a_mux[] = {
1911 SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
1912};
1913static const unsigned int ssi2_ctrl_b_pins[] = {
1914 /* SCK, WS */
1915 RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
1916};
1917static const unsigned int ssi2_ctrl_b_mux[] = {
1918 SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
1919};
1920static const unsigned int ssi3_data_pins[] = {
1921 /* SDATA */
1922 RCAR_GP_PIN(6, 7),
1923};
1924static const unsigned int ssi3_data_mux[] = {
1925 SSI_SDATA3_MARK,
1926};
1927static const unsigned int ssi34_ctrl_pins[] = {
1928 /* SCK, WS */
1929 RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
1930};
1931static const unsigned int ssi34_ctrl_mux[] = {
1932 SSI_SCK34_MARK, SSI_WS34_MARK,
1933};
1934static const unsigned int ssi4_data_pins[] = {
1935 /* SDATA */
1936 RCAR_GP_PIN(6, 10),
1937};
1938static const unsigned int ssi4_data_mux[] = {
1939 SSI_SDATA4_MARK,
1940};
1941static const unsigned int ssi4_ctrl_pins[] = {
1942 /* SCK, WS */
1943 RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
1944};
1945static const unsigned int ssi4_ctrl_mux[] = {
1946 SSI_SCK4_MARK, SSI_WS4_MARK,
1947};
1948static const unsigned int ssi5_data_pins[] = {
1949 /* SDATA */
1950 RCAR_GP_PIN(6, 13),
1951};
1952static const unsigned int ssi5_data_mux[] = {
1953 SSI_SDATA5_MARK,
1954};
1955static const unsigned int ssi5_ctrl_pins[] = {
1956 /* SCK, WS */
1957 RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
1958};
1959static const unsigned int ssi5_ctrl_mux[] = {
1960 SSI_SCK5_MARK, SSI_WS5_MARK,
1961};
1962static const unsigned int ssi6_data_pins[] = {
1963 /* SDATA */
1964 RCAR_GP_PIN(6, 16),
1965};
1966static const unsigned int ssi6_data_mux[] = {
1967 SSI_SDATA6_MARK,
1968};
1969static const unsigned int ssi6_ctrl_pins[] = {
1970 /* SCK, WS */
1971 RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
1972};
1973static const unsigned int ssi6_ctrl_mux[] = {
1974 SSI_SCK6_MARK, SSI_WS6_MARK,
1975};
1976static const unsigned int ssi7_data_pins[] = {
1977 /* SDATA */
1978 RCAR_GP_PIN(6, 19),
1979};
1980static const unsigned int ssi7_data_mux[] = {
1981 SSI_SDATA7_MARK,
1982};
1983static const unsigned int ssi78_ctrl_pins[] = {
1984 /* SCK, WS */
1985 RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
1986};
1987static const unsigned int ssi78_ctrl_mux[] = {
1988 SSI_SCK78_MARK, SSI_WS78_MARK,
1989};
1990static const unsigned int ssi8_data_pins[] = {
1991 /* SDATA */
1992 RCAR_GP_PIN(6, 20),
1993};
1994static const unsigned int ssi8_data_mux[] = {
1995 SSI_SDATA8_MARK,
1996};
1997static const unsigned int ssi9_data_a_pins[] = {
1998 /* SDATA */
1999 RCAR_GP_PIN(6, 21),
2000};
2001static const unsigned int ssi9_data_a_mux[] = {
2002 SSI_SDATA9_A_MARK,
2003};
2004static const unsigned int ssi9_data_b_pins[] = {
2005 /* SDATA */
2006 RCAR_GP_PIN(5, 14),
2007};
2008static const unsigned int ssi9_data_b_mux[] = {
2009 SSI_SDATA9_B_MARK,
2010};
2011static const unsigned int ssi9_ctrl_a_pins[] = {
2012 /* SCK, WS */
2013 RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
2014};
2015static const unsigned int ssi9_ctrl_a_mux[] = {
2016 SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
2017};
2018static const unsigned int ssi9_ctrl_b_pins[] = {
2019 /* SCK, WS */
2020 RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
2021};
2022static const unsigned int ssi9_ctrl_b_mux[] = {
2023 SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
2024};
2025
2026static const struct sh_pfc_pin_group pinmux_groups[] = {
2027 SH_PFC_PIN_GROUP(audio_clk_a_a),
2028 SH_PFC_PIN_GROUP(audio_clk_a_b),
2029 SH_PFC_PIN_GROUP(audio_clk_a_c),
2030 SH_PFC_PIN_GROUP(audio_clk_b_a),
2031 SH_PFC_PIN_GROUP(audio_clk_b_b),
2032 SH_PFC_PIN_GROUP(audio_clk_c_a),
2033 SH_PFC_PIN_GROUP(audio_clk_c_b),
2034 SH_PFC_PIN_GROUP(audio_clkout_a),
2035 SH_PFC_PIN_GROUP(audio_clkout_b),
2036 SH_PFC_PIN_GROUP(audio_clkout_c),
2037 SH_PFC_PIN_GROUP(audio_clkout_d),
2038 SH_PFC_PIN_GROUP(audio_clkout1_a),
2039 SH_PFC_PIN_GROUP(audio_clkout1_b),
2040 SH_PFC_PIN_GROUP(audio_clkout2_a),
2041 SH_PFC_PIN_GROUP(audio_clkout2_b),
2042 SH_PFC_PIN_GROUP(audio_clkout3_a),
2043 SH_PFC_PIN_GROUP(audio_clkout3_b),
2044 SH_PFC_PIN_GROUP(avb_link),
2045 SH_PFC_PIN_GROUP(avb_magic),
2046 SH_PFC_PIN_GROUP(avb_phy_int),
2047 SH_PFC_PIN_GROUP(avb_mdc),
2048 SH_PFC_PIN_GROUP(avb_avtp_pps),
2049 SH_PFC_PIN_GROUP(avb_avtp_match_a),
2050 SH_PFC_PIN_GROUP(avb_avtp_capture_a),
2051 SH_PFC_PIN_GROUP(avb_avtp_match_b),
2052 SH_PFC_PIN_GROUP(avb_avtp_capture_b),
2053 SH_PFC_PIN_GROUP(i2c1_a),
2054 SH_PFC_PIN_GROUP(i2c1_b),
2055 SH_PFC_PIN_GROUP(i2c2_a),
2056 SH_PFC_PIN_GROUP(i2c2_b),
2057 SH_PFC_PIN_GROUP(i2c6_a),
2058 SH_PFC_PIN_GROUP(i2c6_b),
2059 SH_PFC_PIN_GROUP(i2c6_c),
2060 SH_PFC_PIN_GROUP(scif0_data),
2061 SH_PFC_PIN_GROUP(scif0_clk),
2062 SH_PFC_PIN_GROUP(scif0_ctrl),
2063 SH_PFC_PIN_GROUP(scif1_data_a),
2064 SH_PFC_PIN_GROUP(scif1_clk),
2065 SH_PFC_PIN_GROUP(scif1_ctrl),
2066 SH_PFC_PIN_GROUP(scif1_data_b),
2067 SH_PFC_PIN_GROUP(scif2_data_a),
2068 SH_PFC_PIN_GROUP(scif2_clk),
2069 SH_PFC_PIN_GROUP(scif2_data_b),
2070 SH_PFC_PIN_GROUP(scif3_data_a),
2071 SH_PFC_PIN_GROUP(scif3_clk),
2072 SH_PFC_PIN_GROUP(scif3_ctrl),
2073 SH_PFC_PIN_GROUP(scif3_data_b),
2074 SH_PFC_PIN_GROUP(scif4_data_a),
2075 SH_PFC_PIN_GROUP(scif4_clk_a),
2076 SH_PFC_PIN_GROUP(scif4_ctrl_a),
2077 SH_PFC_PIN_GROUP(scif4_data_b),
2078 SH_PFC_PIN_GROUP(scif4_clk_b),
2079 SH_PFC_PIN_GROUP(scif4_ctrl_b),
2080 SH_PFC_PIN_GROUP(scif4_data_c),
2081 SH_PFC_PIN_GROUP(scif4_clk_c),
2082 SH_PFC_PIN_GROUP(scif4_ctrl_c),
2083 SH_PFC_PIN_GROUP(scif5_data),
2084 SH_PFC_PIN_GROUP(scif5_clk),
2085 SH_PFC_PIN_GROUP(ssi0_data),
2086 SH_PFC_PIN_GROUP(ssi01239_ctrl),
2087 SH_PFC_PIN_GROUP(ssi1_data_a),
2088 SH_PFC_PIN_GROUP(ssi1_data_b),
2089 SH_PFC_PIN_GROUP(ssi1_ctrl_a),
2090 SH_PFC_PIN_GROUP(ssi1_ctrl_b),
2091 SH_PFC_PIN_GROUP(ssi2_data_a),
2092 SH_PFC_PIN_GROUP(ssi2_data_b),
2093 SH_PFC_PIN_GROUP(ssi2_ctrl_a),
2094 SH_PFC_PIN_GROUP(ssi2_ctrl_b),
2095 SH_PFC_PIN_GROUP(ssi3_data),
2096 SH_PFC_PIN_GROUP(ssi34_ctrl),
2097 SH_PFC_PIN_GROUP(ssi4_data),
2098 SH_PFC_PIN_GROUP(ssi4_ctrl),
2099 SH_PFC_PIN_GROUP(ssi5_data),
2100 SH_PFC_PIN_GROUP(ssi5_ctrl),
2101 SH_PFC_PIN_GROUP(ssi6_data),
2102 SH_PFC_PIN_GROUP(ssi6_ctrl),
2103 SH_PFC_PIN_GROUP(ssi7_data),
2104 SH_PFC_PIN_GROUP(ssi78_ctrl),
2105 SH_PFC_PIN_GROUP(ssi8_data),
2106 SH_PFC_PIN_GROUP(ssi9_data_a),
2107 SH_PFC_PIN_GROUP(ssi9_data_b),
2108 SH_PFC_PIN_GROUP(ssi9_ctrl_a),
2109 SH_PFC_PIN_GROUP(ssi9_ctrl_b),
2110};
2111
2112static const char * const audio_clk_groups[] = {
2113 "audio_clk_a_a",
2114 "audio_clk_a_b",
2115 "audio_clk_a_c",
2116 "audio_clk_b_a",
2117 "audio_clk_b_b",
2118 "audio_clk_c_a",
2119 "audio_clk_c_b",
2120 "audio_clkout_a",
2121 "audio_clkout_b",
2122 "audio_clkout_c",
2123 "audio_clkout_d",
2124 "audio_clkout1_a",
2125 "audio_clkout1_b",
2126 "audio_clkout2_a",
2127 "audio_clkout2_b",
2128 "audio_clkout3_a",
2129 "audio_clkout3_b",
2130};
2131
2132static const char * const avb_groups[] = {
2133 "avb_link",
2134 "avb_magic",
2135 "avb_phy_int",
2136 "avb_mdc",
2137 "avb_avtp_pps",
2138 "avb_avtp_match_a",
2139 "avb_avtp_capture_a",
2140 "avb_avtp_match_b",
2141 "avb_avtp_capture_b",
2142};
2143
2144static const char * const i2c1_groups[] = {
2145 "i2c1_a",
2146 "i2c1_b",
2147};
2148
2149static const char * const i2c2_groups[] = {
2150 "i2c2_a",
2151 "i2c2_b",
2152};
2153
2154static const char * const i2c6_groups[] = {
2155 "i2c6_a",
2156 "i2c6_b",
2157 "i2c6_c",
2158};
2159
2160static const char * const scif0_groups[] = {
2161 "scif0_data",
2162 "scif0_clk",
2163 "scif0_ctrl",
2164};
2165
2166static const char * const scif1_groups[] = {
2167 "scif1_data_a",
2168 "scif1_clk",
2169 "scif1_ctrl",
2170 "scif1_data_b",
2171};
2172
2173static const char * const scif2_groups[] = {
2174 "scif2_data_a",
2175 "scif2_clk",
2176 "scif2_data_b",
2177};
2178
2179static const char * const scif3_groups[] = {
2180 "scif3_data_a",
2181 "scif3_clk",
2182 "scif3_ctrl",
2183 "scif3_data_b",
2184};
2185
2186static const char * const scif4_groups[] = {
2187 "scif4_data_a",
2188 "scif4_clk_a",
2189 "scif4_ctrl_a",
2190 "scif4_data_b",
2191 "scif4_clk_b",
2192 "scif4_ctrl_b",
2193 "scif4_data_c",
2194 "scif4_clk_c",
2195 "scif4_ctrl_c",
2196};
2197
2198static const char * const scif5_groups[] = {
2199 "scif5_data",
2200 "scif5_clk",
2201};
2202
2203static const char * const ssi_groups[] = {
2204 "ssi0_data",
2205 "ssi01239_ctrl",
2206 "ssi1_data_a",
2207 "ssi1_data_b",
2208 "ssi1_ctrl_a",
2209 "ssi1_ctrl_b",
2210 "ssi2_data_a",
2211 "ssi2_data_b",
2212 "ssi2_ctrl_a",
2213 "ssi2_ctrl_b",
2214 "ssi3_data",
2215 "ssi34_ctrl",
2216 "ssi4_data",
2217 "ssi4_ctrl",
2218 "ssi5_data",
2219 "ssi5_ctrl",
2220 "ssi6_data",
2221 "ssi6_ctrl",
2222 "ssi7_data",
2223 "ssi78_ctrl",
2224 "ssi8_data",
2225 "ssi9_data_a",
2226 "ssi9_data_b",
2227 "ssi9_ctrl_a",
2228 "ssi9_ctrl_b",
2229};
2230
2231static const struct sh_pfc_function pinmux_functions[] = {
2232 SH_PFC_FUNCTION(audio_clk),
2233 SH_PFC_FUNCTION(avb),
2234 SH_PFC_FUNCTION(i2c1),
2235 SH_PFC_FUNCTION(i2c2),
2236 SH_PFC_FUNCTION(i2c6),
2237 SH_PFC_FUNCTION(scif0),
2238 SH_PFC_FUNCTION(scif1),
2239 SH_PFC_FUNCTION(scif2),
2240 SH_PFC_FUNCTION(scif3),
2241 SH_PFC_FUNCTION(scif4),
2242 SH_PFC_FUNCTION(scif5),
2243 SH_PFC_FUNCTION(ssi),
2244};
2245
2246static const struct pinmux_cfg_reg pinmux_config_regs[] = {
2247#define F_(x, y) FN_##y
2248#define FM(x) FN_##x
2249 { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
2250 0, 0,
2251 0, 0,
2252 0, 0,
2253 0, 0,
2254 0, 0,
2255 0, 0,
2256 0, 0,
2257 0, 0,
2258 0, 0,
2259 0, 0,
2260 0, 0,
2261 0, 0,
2262 0, 0,
2263 0, 0,
2264 0, 0,
2265 0, 0,
2266 GP_0_15_FN, GPSR0_15,
2267 GP_0_14_FN, GPSR0_14,
2268 GP_0_13_FN, GPSR0_13,
2269 GP_0_12_FN, GPSR0_12,
2270 GP_0_11_FN, GPSR0_11,
2271 GP_0_10_FN, GPSR0_10,
2272 GP_0_9_FN, GPSR0_9,
2273 GP_0_8_FN, GPSR0_8,
2274 GP_0_7_FN, GPSR0_7,
2275 GP_0_6_FN, GPSR0_6,
2276 GP_0_5_FN, GPSR0_5,
2277 GP_0_4_FN, GPSR0_4,
2278 GP_0_3_FN, GPSR0_3,
2279 GP_0_2_FN, GPSR0_2,
2280 GP_0_1_FN, GPSR0_1,
2281 GP_0_0_FN, GPSR0_0, }
2282 },
2283 { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
2284 0, 0,
2285 0, 0,
2286 0, 0,
2287 0, 0,
2288 GP_1_27_FN, GPSR1_27,
2289 GP_1_26_FN, GPSR1_26,
2290 GP_1_25_FN, GPSR1_25,
2291 GP_1_24_FN, GPSR1_24,
2292 GP_1_23_FN, GPSR1_23,
2293 GP_1_22_FN, GPSR1_22,
2294 GP_1_21_FN, GPSR1_21,
2295 GP_1_20_FN, GPSR1_20,
2296 GP_1_19_FN, GPSR1_19,
2297 GP_1_18_FN, GPSR1_18,
2298 GP_1_17_FN, GPSR1_17,
2299 GP_1_16_FN, GPSR1_16,
2300 GP_1_15_FN, GPSR1_15,
2301 GP_1_14_FN, GPSR1_14,
2302 GP_1_13_FN, GPSR1_13,
2303 GP_1_12_FN, GPSR1_12,
2304 GP_1_11_FN, GPSR1_11,
2305 GP_1_10_FN, GPSR1_10,
2306 GP_1_9_FN, GPSR1_9,
2307 GP_1_8_FN, GPSR1_8,
2308 GP_1_7_FN, GPSR1_7,
2309 GP_1_6_FN, GPSR1_6,
2310 GP_1_5_FN, GPSR1_5,
2311 GP_1_4_FN, GPSR1_4,
2312 GP_1_3_FN, GPSR1_3,
2313 GP_1_2_FN, GPSR1_2,
2314 GP_1_1_FN, GPSR1_1,
2315 GP_1_0_FN, GPSR1_0, }
2316 },
2317 { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
2318 0, 0,
2319 0, 0,
2320 0, 0,
2321 0, 0,
2322 0, 0,
2323 0, 0,
2324 0, 0,
2325 0, 0,
2326 0, 0,
2327 0, 0,
2328 0, 0,
2329 0, 0,
2330 0, 0,
2331 0, 0,
2332 0, 0,
2333 0, 0,
2334 0, 0,
2335 GP_2_14_FN, GPSR2_14,
2336 GP_2_13_FN, GPSR2_13,
2337 GP_2_12_FN, GPSR2_12,
2338 GP_2_11_FN, GPSR2_11,
2339 GP_2_10_FN, GPSR2_10,
2340 GP_2_9_FN, GPSR2_9,
2341 GP_2_8_FN, GPSR2_8,
2342 GP_2_7_FN, GPSR2_7,
2343 GP_2_6_FN, GPSR2_6,
2344 GP_2_5_FN, GPSR2_5,
2345 GP_2_4_FN, GPSR2_4,
2346 GP_2_3_FN, GPSR2_3,
2347 GP_2_2_FN, GPSR2_2,
2348 GP_2_1_FN, GPSR2_1,
2349 GP_2_0_FN, GPSR2_0, }
2350 },
2351 { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
2352 0, 0,
2353 0, 0,
2354 0, 0,
2355 0, 0,
2356 0, 0,
2357 0, 0,
2358 0, 0,
2359 0, 0,
2360 0, 0,
2361 0, 0,
2362 0, 0,
2363 0, 0,
2364 0, 0,
2365 0, 0,
2366 0, 0,
2367 0, 0,
2368 GP_3_15_FN, GPSR3_15,
2369 GP_3_14_FN, GPSR3_14,
2370 GP_3_13_FN, GPSR3_13,
2371 GP_3_12_FN, GPSR3_12,
2372 GP_3_11_FN, GPSR3_11,
2373 GP_3_10_FN, GPSR3_10,
2374 GP_3_9_FN, GPSR3_9,
2375 GP_3_8_FN, GPSR3_8,
2376 GP_3_7_FN, GPSR3_7,
2377 GP_3_6_FN, GPSR3_6,
2378 GP_3_5_FN, GPSR3_5,
2379 GP_3_4_FN, GPSR3_4,
2380 GP_3_3_FN, GPSR3_3,
2381 GP_3_2_FN, GPSR3_2,
2382 GP_3_1_FN, GPSR3_1,
2383 GP_3_0_FN, GPSR3_0, }
2384 },
2385 { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
2386 0, 0,
2387 0, 0,
2388 0, 0,
2389 0, 0,
2390 0, 0,
2391 0, 0,
2392 0, 0,
2393 0, 0,
2394 0, 0,
2395 0, 0,
2396 0, 0,
2397 0, 0,
2398 0, 0,
2399 0, 0,
2400 GP_4_17_FN, GPSR4_17,
2401 GP_4_16_FN, GPSR4_16,
2402 GP_4_15_FN, GPSR4_15,
2403 GP_4_14_FN, GPSR4_14,
2404 GP_4_13_FN, GPSR4_13,
2405 GP_4_12_FN, GPSR4_12,
2406 GP_4_11_FN, GPSR4_11,
2407 GP_4_10_FN, GPSR4_10,
2408 GP_4_9_FN, GPSR4_9,
2409 GP_4_8_FN, GPSR4_8,
2410 GP_4_7_FN, GPSR4_7,
2411 GP_4_6_FN, GPSR4_6,
2412 GP_4_5_FN, GPSR4_5,
2413 GP_4_4_FN, GPSR4_4,
2414 GP_4_3_FN, GPSR4_3,
2415 GP_4_2_FN, GPSR4_2,
2416 GP_4_1_FN, GPSR4_1,
2417 GP_4_0_FN, GPSR4_0, }
2418 },
2419 { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
2420 0, 0,
2421 0, 0,
2422 0, 0,
2423 0, 0,
2424 0, 0,
2425 0, 0,
2426 GP_5_25_FN, GPSR5_25,
2427 GP_5_24_FN, GPSR5_24,
2428 GP_5_23_FN, GPSR5_23,
2429 GP_5_22_FN, GPSR5_22,
2430 GP_5_21_FN, GPSR5_21,
2431 GP_5_20_FN, GPSR5_20,
2432 GP_5_19_FN, GPSR5_19,
2433 GP_5_18_FN, GPSR5_18,
2434 GP_5_17_FN, GPSR5_17,
2435 GP_5_16_FN, GPSR5_16,
2436 GP_5_15_FN, GPSR5_15,
2437 GP_5_14_FN, GPSR5_14,
2438 GP_5_13_FN, GPSR5_13,
2439 GP_5_12_FN, GPSR5_12,
2440 GP_5_11_FN, GPSR5_11,
2441 GP_5_10_FN, GPSR5_10,
2442 GP_5_9_FN, GPSR5_9,
2443 GP_5_8_FN, GPSR5_8,
2444 GP_5_7_FN, GPSR5_7,
2445 GP_5_6_FN, GPSR5_6,
2446 GP_5_5_FN, GPSR5_5,
2447 GP_5_4_FN, GPSR5_4,
2448 GP_5_3_FN, GPSR5_3,
2449 GP_5_2_FN, GPSR5_2,
2450 GP_5_1_FN, GPSR5_1,
2451 GP_5_0_FN, GPSR5_0, }
2452 },
2453 { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
2454 GP_6_31_FN, GPSR6_31,
2455 GP_6_30_FN, GPSR6_30,
2456 GP_6_29_FN, GPSR6_29,
2457 GP_6_28_FN, GPSR6_28,
2458 GP_6_27_FN, GPSR6_27,
2459 GP_6_26_FN, GPSR6_26,
2460 GP_6_25_FN, GPSR6_25,
2461 GP_6_24_FN, GPSR6_24,
2462 GP_6_23_FN, GPSR6_23,
2463 GP_6_22_FN, GPSR6_22,
2464 GP_6_21_FN, GPSR6_21,
2465 GP_6_20_FN, GPSR6_20,
2466 GP_6_19_FN, GPSR6_19,
2467 GP_6_18_FN, GPSR6_18,
2468 GP_6_17_FN, GPSR6_17,
2469 GP_6_16_FN, GPSR6_16,
2470 GP_6_15_FN, GPSR6_15,
2471 GP_6_14_FN, GPSR6_14,
2472 GP_6_13_FN, GPSR6_13,
2473 GP_6_12_FN, GPSR6_12,
2474 GP_6_11_FN, GPSR6_11,
2475 GP_6_10_FN, GPSR6_10,
2476 GP_6_9_FN, GPSR6_9,
2477 GP_6_8_FN, GPSR6_8,
2478 GP_6_7_FN, GPSR6_7,
2479 GP_6_6_FN, GPSR6_6,
2480 GP_6_5_FN, GPSR6_5,
2481 GP_6_4_FN, GPSR6_4,
2482 GP_6_3_FN, GPSR6_3,
2483 GP_6_2_FN, GPSR6_2,
2484 GP_6_1_FN, GPSR6_1,
2485 GP_6_0_FN, GPSR6_0, }
2486 },
2487 { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1) {
2488 0, 0,
2489 0, 0,
2490 0, 0,
2491 0, 0,
2492 0, 0,
2493 0, 0,
2494 0, 0,
2495 0, 0,
2496 0, 0,
2497 0, 0,
2498 0, 0,
2499 0, 0,
2500 0, 0,
2501 0, 0,
2502 0, 0,
2503 0, 0,
2504 0, 0,
2505 0, 0,
2506 0, 0,
2507 0, 0,
2508 0, 0,
2509 0, 0,
2510 0, 0,
2511 0, 0,
2512 0, 0,
2513 0, 0,
2514 0, 0,
2515 0, 0,
2516 GP_7_3_FN, GPSR7_3,
2517 GP_7_2_FN, GPSR7_2,
2518 GP_7_1_FN, GPSR7_1,
2519 GP_7_0_FN, GPSR7_0, }
2520 },
2521#undef F_
2522#undef FM
2523
2524#define F_(x, y) x,
2525#define FM(x) FN_##x,
2526 { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
2527 IP0_31_28
2528 IP0_27_24
2529 IP0_23_20
2530 IP0_19_16
2531 IP0_15_12
2532 IP0_11_8
2533 IP0_7_4
2534 IP0_3_0 }
2535 },
2536 { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
2537 IP1_31_28
2538 IP1_27_24
2539 IP1_23_20
2540 IP1_19_16
2541 IP1_15_12
2542 IP1_11_8
2543 IP1_7_4
2544 IP1_3_0 }
2545 },
2546 { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
2547 IP2_31_28
2548 IP2_27_24
2549 IP2_23_20
2550 IP2_19_16
2551 IP2_15_12
2552 IP2_11_8
2553 IP2_7_4
2554 IP2_3_0 }
2555 },
2556 { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
2557 IP3_31_28
2558 IP3_27_24
2559 IP3_23_20
2560 IP3_19_16
2561 IP3_15_12
2562 IP3_11_8
2563 IP3_7_4
2564 IP3_3_0 }
2565 },
2566 { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
2567 IP4_31_28
2568 IP4_27_24
2569 IP4_23_20
2570 IP4_19_16
2571 IP4_15_12
2572 IP4_11_8
2573 IP4_7_4
2574 IP4_3_0 }
2575 },
2576 { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
2577 IP5_31_28
2578 IP5_27_24
2579 IP5_23_20
2580 IP5_19_16
2581 IP5_15_12
2582 IP5_11_8
2583 IP5_7_4
2584 IP5_3_0 }
2585 },
2586 { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
2587 IP6_31_28
2588 IP6_27_24
2589 IP6_23_20
2590 IP6_19_16
2591 IP6_15_12
2592 IP6_11_8
2593 IP6_7_4
2594 IP6_3_0 }
2595 },
2596 { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
2597 IP7_31_28
2598 IP7_27_24
2599 IP7_23_20
2600 IP7_19_16
2601 IP7_15_12
2602 IP7_11_8
2603 IP7_7_4
2604 IP7_3_0 }
2605 },
2606 { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
2607 IP8_31_28
2608 IP8_27_24
2609 IP8_23_20
2610 IP8_19_16
2611 IP8_15_12
2612 IP8_11_8
2613 IP8_7_4
2614 IP8_3_0 }
2615 },
2616 { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
2617 IP9_31_28
2618 IP9_27_24
2619 IP9_23_20
2620 IP9_19_16
2621 IP9_15_12
2622 IP9_11_8
2623 IP9_7_4
2624 IP9_3_0 }
2625 },
2626 { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
2627 IP10_31_28
2628 IP10_27_24
2629 IP10_23_20
2630 IP10_19_16
2631 IP10_15_12
2632 IP10_11_8
2633 IP10_7_4
2634 IP10_3_0 }
2635 },
2636 { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
2637 IP11_31_28
2638 IP11_27_24
2639 IP11_23_20
2640 IP11_19_16
2641 IP11_15_12
2642 IP11_11_8
2643 IP11_7_4
2644 IP11_3_0 }
2645 },
2646 { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
2647 IP12_31_28
2648 IP12_27_24
2649 IP12_23_20
2650 IP12_19_16
2651 IP12_15_12
2652 IP12_11_8
2653 IP12_7_4
2654 IP12_3_0 }
2655 },
2656 { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
2657 IP13_31_28
2658 IP13_27_24
2659 IP13_23_20
2660 IP13_19_16
2661 IP13_15_12
2662 IP13_11_8
2663 IP13_7_4
2664 IP13_3_0 }
2665 },
2666 { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4) {
2667 IP14_31_28
2668 IP14_27_24
2669 IP14_23_20
2670 IP14_19_16
2671 IP14_15_12
2672 IP14_11_8
2673 IP14_7_4
2674 IP14_3_0 }
2675 },
2676 { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4) {
2677 IP15_31_28
2678 IP15_27_24
2679 IP15_23_20
2680 IP15_19_16
2681 IP15_15_12
2682 IP15_11_8
2683 IP15_7_4
2684 IP15_3_0 }
2685 },
2686 { PINMUX_CFG_REG("IPSR16", 0xe6060240, 32, 4) {
2687 IP16_31_28
2688 IP16_27_24
2689 IP16_23_20
2690 IP16_19_16
2691 IP16_15_12
2692 IP16_11_8
2693 IP16_7_4
2694 IP16_3_0 }
2695 },
2696 { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4) {
2697 /* IP17_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2698 /* IP17_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2699 /* IP17_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2700 /* IP17_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2701 /* IP17_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2702 /* IP17_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2703 IP17_7_4
2704 IP17_3_0 }
2705 },
2706#undef F_
2707#undef FM
2708
2709#define F_(x, y) x,
2710#define FM(x) FN_##x,
2711 { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
2712 1, 2, 2, 3, 1, 1, 2, 1, 1, 1,
2713 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1) {
2714 0, 0, /* RESERVED 31 */
2715 MOD_SEL0_30_29
2716 MOD_SEL0_28_27
2717 MOD_SEL0_26_25_24
2718 MOD_SEL0_23
2719 MOD_SEL0_22
2720 MOD_SEL0_21_20
2721 MOD_SEL0_19
2722 MOD_SEL0_18
2723 MOD_SEL0_17
2724 MOD_SEL0_16_15
2725 MOD_SEL0_14
2726 MOD_SEL0_13
2727 MOD_SEL0_12
2728 MOD_SEL0_11
2729 MOD_SEL0_10
2730 MOD_SEL0_9
2731 MOD_SEL0_8
2732 MOD_SEL0_7_6
2733 MOD_SEL0_5_4
2734 MOD_SEL0_3
2735 MOD_SEL0_2_1
2736 0, 0, /* RESERVED 0 */ }
2737 },
2738 { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
2739 2, 3, 1, 2, 3, 1, 1, 2, 1,
2740 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1) {
2741 MOD_SEL1_31_30
2742 MOD_SEL1_29_28_27
2743 MOD_SEL1_26
2744 MOD_SEL1_25_24
2745 MOD_SEL1_23_22_21
2746 MOD_SEL1_20
2747 MOD_SEL1_19
2748 MOD_SEL1_18_17
2749 MOD_SEL1_16
2750 MOD_SEL1_15_14
2751 MOD_SEL1_13
2752 MOD_SEL1_12
2753 MOD_SEL1_11
2754 MOD_SEL1_10
2755 MOD_SEL1_9
2756 0, 0, 0, 0, /* RESERVED 8, 7 */
2757 MOD_SEL1_6
2758 MOD_SEL1_5
2759 MOD_SEL1_4
2760 MOD_SEL1_3
2761 MOD_SEL1_2
2762 MOD_SEL1_1
2763 MOD_SEL1_0 }
2764 },
2765 { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32,
2766 1, 1, 1, 1, 4, 4, 4,
2767 4, 4, 4, 1, 2, 1) {
2768 MOD_SEL2_31
2769 MOD_SEL2_30
2770 MOD_SEL2_29
2771 /* RESERVED 28 */
2772 0, 0,
2773 /* RESERVED 27, 26, 25, 24 */
2774 0, 0, 0, 0, 0, 0, 0, 0,
2775 0, 0, 0, 0, 0, 0, 0, 0,
2776 /* RESERVED 23, 22, 21, 20 */
2777 0, 0, 0, 0, 0, 0, 0, 0,
2778 0, 0, 0, 0, 0, 0, 0, 0,
2779 /* RESERVED 19, 18, 17, 16 */
2780 0, 0, 0, 0, 0, 0, 0, 0,
2781 0, 0, 0, 0, 0, 0, 0, 0,
2782 /* RESERVED 15, 14, 13, 12 */
2783 0, 0, 0, 0, 0, 0, 0, 0,
2784 0, 0, 0, 0, 0, 0, 0, 0,
2785 /* RESERVED 11, 10, 9, 8 */
2786 0, 0, 0, 0, 0, 0, 0, 0,
2787 0, 0, 0, 0, 0, 0, 0, 0,
2788 /* RESERVED 7, 6, 5, 4 */
2789 0, 0, 0, 0, 0, 0, 0, 0,
2790 0, 0, 0, 0, 0, 0, 0, 0,
2791 /* RESERVED 3 */
2792 0, 0,
2793 MOD_SEL2_2_1
2794 MOD_SEL2_0 }
2795 },
2796 { },
2797};
2798
2799const struct sh_pfc_soc_info r8a7795_pinmux_info = {
2800 .name = "r8a77950_pfc",
2801 .unlock_reg = 0xe6060000, /* PMMR */
2802
2803 .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
2804
2805 .pins = pinmux_pins,
2806 .nr_pins = ARRAY_SIZE(pinmux_pins),
2807 .groups = pinmux_groups,
2808 .nr_groups = ARRAY_SIZE(pinmux_groups),
2809 .functions = pinmux_functions,
2810 .nr_functions = ARRAY_SIZE(pinmux_functions),
2811
2812 .cfg_regs = pinmux_config_regs,
2813
2814 .pinmux_data = pinmux_data,
2815 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
2816};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7203.c b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
index 3bda7bafd0ab..61b27ec48876 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7203.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
@@ -1587,6 +1587,6 @@ const struct sh_pfc_soc_info sh7203_pinmux_info = {
1587 .cfg_regs = pinmux_config_regs, 1587 .cfg_regs = pinmux_config_regs,
1588 .data_regs = pinmux_data_regs, 1588 .data_regs = pinmux_data_regs,
1589 1589
1590 .gpio_data = pinmux_data, 1590 .pinmux_data = pinmux_data,
1591 .gpio_data_size = ARRAY_SIZE(pinmux_data), 1591 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
1592}; 1592};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index e1cb6dc05028..8070765311db 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -2126,6 +2126,6 @@ const struct sh_pfc_soc_info sh7264_pinmux_info = {
2126 .cfg_regs = pinmux_config_regs, 2126 .cfg_regs = pinmux_config_regs,
2127 .data_regs = pinmux_data_regs, 2127 .data_regs = pinmux_data_regs,
2128 2128
2129 .gpio_data = pinmux_data, 2129 .pinmux_data = pinmux_data,
2130 .gpio_data_size = ARRAY_SIZE(pinmux_data), 2130 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
2131}; 2131};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index 7a11320ad96d..a50d22bef1f4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -2830,6 +2830,6 @@ const struct sh_pfc_soc_info sh7269_pinmux_info = {
2830 .cfg_regs = pinmux_config_regs, 2830 .cfg_regs = pinmux_config_regs,
2831 .data_regs = pinmux_data_regs, 2831 .data_regs = pinmux_data_regs,
2832 2832
2833 .gpio_data = pinmux_data, 2833 .pinmux_data = pinmux_data,
2834 .gpio_data_size = ARRAY_SIZE(pinmux_data), 2834 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
2835}; 2835};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index 097526576f88..6a69c8c5d943 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -3649,38 +3649,38 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
3649}; 3649};
3650 3650
3651static const struct pinmux_irq pinmux_irqs[] = { 3651static const struct pinmux_irq pinmux_irqs[] = {
3652 PINMUX_IRQ(irq_pin(0), 11), 3652 PINMUX_IRQ(11), /* IRQ0 */
3653 PINMUX_IRQ(irq_pin(1), 10), 3653 PINMUX_IRQ(10), /* IRQ1 */
3654 PINMUX_IRQ(irq_pin(2), 149), 3654 PINMUX_IRQ(149), /* IRQ2 */
3655 PINMUX_IRQ(irq_pin(3), 224), 3655 PINMUX_IRQ(224), /* IRQ3 */
3656 PINMUX_IRQ(irq_pin(4), 159), 3656 PINMUX_IRQ(159), /* IRQ4 */
3657 PINMUX_IRQ(irq_pin(5), 227), 3657 PINMUX_IRQ(227), /* IRQ5 */
3658 PINMUX_IRQ(irq_pin(6), 147), 3658 PINMUX_IRQ(147), /* IRQ6 */
3659 PINMUX_IRQ(irq_pin(7), 150), 3659 PINMUX_IRQ(150), /* IRQ7 */
3660 PINMUX_IRQ(irq_pin(8), 223), 3660 PINMUX_IRQ(223), /* IRQ8 */
3661 PINMUX_IRQ(irq_pin(9), 56, 308), 3661 PINMUX_IRQ(56, 308), /* IRQ9 */
3662 PINMUX_IRQ(irq_pin(10), 54), 3662 PINMUX_IRQ(54), /* IRQ10 */
3663 PINMUX_IRQ(irq_pin(11), 238), 3663 PINMUX_IRQ(238), /* IRQ11 */
3664 PINMUX_IRQ(irq_pin(12), 156), 3664 PINMUX_IRQ(156), /* IRQ12 */
3665 PINMUX_IRQ(irq_pin(13), 239), 3665 PINMUX_IRQ(239), /* IRQ13 */
3666 PINMUX_IRQ(irq_pin(14), 251), 3666 PINMUX_IRQ(251), /* IRQ14 */
3667 PINMUX_IRQ(irq_pin(15), 0), 3667 PINMUX_IRQ(0), /* IRQ15 */
3668 PINMUX_IRQ(irq_pin(16), 249), 3668 PINMUX_IRQ(249), /* IRQ16 */
3669 PINMUX_IRQ(irq_pin(17), 234), 3669 PINMUX_IRQ(234), /* IRQ17 */
3670 PINMUX_IRQ(irq_pin(18), 13), 3670 PINMUX_IRQ(13), /* IRQ18 */
3671 PINMUX_IRQ(irq_pin(19), 9), 3671 PINMUX_IRQ(9), /* IRQ19 */
3672 PINMUX_IRQ(irq_pin(20), 14), 3672 PINMUX_IRQ(14), /* IRQ20 */
3673 PINMUX_IRQ(irq_pin(21), 15), 3673 PINMUX_IRQ(15), /* IRQ21 */
3674 PINMUX_IRQ(irq_pin(22), 40), 3674 PINMUX_IRQ(40), /* IRQ22 */
3675 PINMUX_IRQ(irq_pin(23), 53), 3675 PINMUX_IRQ(53), /* IRQ23 */
3676 PINMUX_IRQ(irq_pin(24), 118), 3676 PINMUX_IRQ(118), /* IRQ24 */
3677 PINMUX_IRQ(irq_pin(25), 164), 3677 PINMUX_IRQ(164), /* IRQ25 */
3678 PINMUX_IRQ(irq_pin(26), 115), 3678 PINMUX_IRQ(115), /* IRQ26 */
3679 PINMUX_IRQ(irq_pin(27), 116), 3679 PINMUX_IRQ(116), /* IRQ27 */
3680 PINMUX_IRQ(irq_pin(28), 117), 3680 PINMUX_IRQ(117), /* IRQ28 */
3681 PINMUX_IRQ(irq_pin(29), 28), 3681 PINMUX_IRQ(28), /* IRQ29 */
3682 PINMUX_IRQ(irq_pin(30), 27), 3682 PINMUX_IRQ(27), /* IRQ30 */
3683 PINMUX_IRQ(irq_pin(31), 26), 3683 PINMUX_IRQ(26), /* IRQ31 */
3684}; 3684};
3685 3685
3686/* ----------------------------------------------------------------------------- 3686/* -----------------------------------------------------------------------------
@@ -3865,8 +3865,8 @@ const struct sh_pfc_soc_info sh73a0_pinmux_info = {
3865 .cfg_regs = pinmux_config_regs, 3865 .cfg_regs = pinmux_config_regs,
3866 .data_regs = pinmux_data_regs, 3866 .data_regs = pinmux_data_regs,
3867 3867
3868 .gpio_data = pinmux_data, 3868 .pinmux_data = pinmux_data,
3869 .gpio_data_size = ARRAY_SIZE(pinmux_data), 3869 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
3870 3870
3871 .gpio_irq = pinmux_irqs, 3871 .gpio_irq = pinmux_irqs,
3872 .gpio_irq_size = ARRAY_SIZE(pinmux_irqs), 3872 .gpio_irq_size = ARRAY_SIZE(pinmux_irqs),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7720.c b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
index 13d05f88bc01..e07a82df42c8 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7720.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
@@ -1201,6 +1201,6 @@ const struct sh_pfc_soc_info sh7720_pinmux_info = {
1201 .cfg_regs = pinmux_config_regs, 1201 .cfg_regs = pinmux_config_regs,
1202 .data_regs = pinmux_data_regs, 1202 .data_regs = pinmux_data_regs,
1203 1203
1204 .gpio_data = pinmux_data, 1204 .pinmux_data = pinmux_data,
1205 .gpio_data_size = ARRAY_SIZE(pinmux_data), 1205 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
1206}; 1206};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7722.c b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
index 914d872c37a4..29c69133b0ef 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7722.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
@@ -1741,6 +1741,6 @@ const struct sh_pfc_soc_info sh7722_pinmux_info = {
1741 .cfg_regs = pinmux_config_regs, 1741 .cfg_regs = pinmux_config_regs,
1742 .data_regs = pinmux_data_regs, 1742 .data_regs = pinmux_data_regs,
1743 1743
1744 .gpio_data = pinmux_data, 1744 .pinmux_data = pinmux_data,
1745 .gpio_data_size = ARRAY_SIZE(pinmux_data), 1745 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
1746}; 1746};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7723.c b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
index 4eb7eae2e6d0..8ea18df03492 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7723.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
@@ -1893,6 +1893,6 @@ const struct sh_pfc_soc_info sh7723_pinmux_info = {
1893 .cfg_regs = pinmux_config_regs, 1893 .cfg_regs = pinmux_config_regs,
1894 .data_regs = pinmux_data_regs, 1894 .data_regs = pinmux_data_regs,
1895 1895
1896 .gpio_data = pinmux_data, 1896 .pinmux_data = pinmux_data,
1897 .gpio_data_size = ARRAY_SIZE(pinmux_data), 1897 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
1898}; 1898};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7724.c b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
index 74a1a7f1317c..7f6c36c1a8fa 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7724.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
@@ -2175,6 +2175,6 @@ const struct sh_pfc_soc_info sh7724_pinmux_info = {
2175 .cfg_regs = pinmux_config_regs, 2175 .cfg_regs = pinmux_config_regs,
2176 .data_regs = pinmux_data_regs, 2176 .data_regs = pinmux_data_regs,
2177 2177
2178 .gpio_data = pinmux_data, 2178 .pinmux_data = pinmux_data,
2179 .gpio_data_size = ARRAY_SIZE(pinmux_data), 2179 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
2180}; 2180};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index e53dd1cb1625..e7deb51de7dc 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -598,502 +598,502 @@ static const u16 pinmux_data[] = {
598 /* IPSR0 */ 598 /* IPSR0 */
599 PINMUX_IPSR_DATA(IP0_1_0, A0), 599 PINMUX_IPSR_DATA(IP0_1_0, A0),
600 PINMUX_IPSR_DATA(IP0_1_0, ST0_CLKIN), 600 PINMUX_IPSR_DATA(IP0_1_0, ST0_CLKIN),
601 PINMUX_IPSR_MODSEL_DATA(IP0_1_0, LCD_DATA0_A, SEL_LCDC_0), 601 PINMUX_IPSR_MSEL(IP0_1_0, LCD_DATA0_A, SEL_LCDC_0),
602 PINMUX_IPSR_MODSEL_DATA(IP0_1_0, TCLKA_C, SEL_MTU2_CLK_1), 602 PINMUX_IPSR_MSEL(IP0_1_0, TCLKA_C, SEL_MTU2_CLK_1),
603 603
604 PINMUX_IPSR_DATA(IP0_3_2, A1), 604 PINMUX_IPSR_DATA(IP0_3_2, A1),
605 PINMUX_IPSR_DATA(IP0_3_2, ST0_REQ), 605 PINMUX_IPSR_DATA(IP0_3_2, ST0_REQ),
606 PINMUX_IPSR_MODSEL_DATA(IP0_3_2, LCD_DATA1_A, SEL_LCDC_0), 606 PINMUX_IPSR_MSEL(IP0_3_2, LCD_DATA1_A, SEL_LCDC_0),
607 PINMUX_IPSR_MODSEL_DATA(IP0_3_2, TCLKB_C, SEL_MTU2_CLK_1), 607 PINMUX_IPSR_MSEL(IP0_3_2, TCLKB_C, SEL_MTU2_CLK_1),
608 608
609 PINMUX_IPSR_DATA(IP0_5_4, A2), 609 PINMUX_IPSR_DATA(IP0_5_4, A2),
610 PINMUX_IPSR_DATA(IP0_5_4, ST0_SYC), 610 PINMUX_IPSR_DATA(IP0_5_4, ST0_SYC),
611 PINMUX_IPSR_MODSEL_DATA(IP0_5_4, LCD_DATA2_A, SEL_LCDC_0), 611 PINMUX_IPSR_MSEL(IP0_5_4, LCD_DATA2_A, SEL_LCDC_0),
612 PINMUX_IPSR_MODSEL_DATA(IP0_5_4, TCLKC_C, SEL_MTU2_CLK_1), 612 PINMUX_IPSR_MSEL(IP0_5_4, TCLKC_C, SEL_MTU2_CLK_1),
613 613
614 PINMUX_IPSR_DATA(IP0_7_6, A3), 614 PINMUX_IPSR_DATA(IP0_7_6, A3),
615 PINMUX_IPSR_DATA(IP0_7_6, ST0_VLD), 615 PINMUX_IPSR_DATA(IP0_7_6, ST0_VLD),
616 PINMUX_IPSR_MODSEL_DATA(IP0_7_6, LCD_DATA3_A, SEL_LCDC_0), 616 PINMUX_IPSR_MSEL(IP0_7_6, LCD_DATA3_A, SEL_LCDC_0),
617 PINMUX_IPSR_MODSEL_DATA(IP0_7_6, TCLKD_C, SEL_MTU2_CLK_1), 617 PINMUX_IPSR_MSEL(IP0_7_6, TCLKD_C, SEL_MTU2_CLK_1),
618 618
619 PINMUX_IPSR_DATA(IP0_9_8, A4), 619 PINMUX_IPSR_DATA(IP0_9_8, A4),
620 PINMUX_IPSR_DATA(IP0_9_8, ST0_D0), 620 PINMUX_IPSR_DATA(IP0_9_8, ST0_D0),
621 PINMUX_IPSR_MODSEL_DATA(IP0_9_8, LCD_DATA4_A, SEL_LCDC_0), 621 PINMUX_IPSR_MSEL(IP0_9_8, LCD_DATA4_A, SEL_LCDC_0),
622 PINMUX_IPSR_MODSEL_DATA(IP0_9_8, TIOC0A_C, SEL_MTU2_CH0_1), 622 PINMUX_IPSR_MSEL(IP0_9_8, TIOC0A_C, SEL_MTU2_CH0_1),
623 623
624 PINMUX_IPSR_DATA(IP0_11_10, A5), 624 PINMUX_IPSR_DATA(IP0_11_10, A5),
625 PINMUX_IPSR_DATA(IP0_11_10, ST0_D1), 625 PINMUX_IPSR_DATA(IP0_11_10, ST0_D1),
626 PINMUX_IPSR_MODSEL_DATA(IP0_11_10, LCD_DATA5_A, SEL_LCDC_0), 626 PINMUX_IPSR_MSEL(IP0_11_10, LCD_DATA5_A, SEL_LCDC_0),
627 PINMUX_IPSR_MODSEL_DATA(IP0_11_10, TIOC0B_C, SEL_MTU2_CH0_1), 627 PINMUX_IPSR_MSEL(IP0_11_10, TIOC0B_C, SEL_MTU2_CH0_1),
628 628
629 PINMUX_IPSR_DATA(IP0_13_12, A6), 629 PINMUX_IPSR_DATA(IP0_13_12, A6),
630 PINMUX_IPSR_DATA(IP0_13_12, ST0_D2), 630 PINMUX_IPSR_DATA(IP0_13_12, ST0_D2),
631 PINMUX_IPSR_MODSEL_DATA(IP0_13_12, LCD_DATA6_A, SEL_LCDC_0), 631 PINMUX_IPSR_MSEL(IP0_13_12, LCD_DATA6_A, SEL_LCDC_0),
632 PINMUX_IPSR_MODSEL_DATA(IP0_13_12, TIOC0C_C, SEL_MTU2_CH0_1), 632 PINMUX_IPSR_MSEL(IP0_13_12, TIOC0C_C, SEL_MTU2_CH0_1),
633 633
634 PINMUX_IPSR_DATA(IP0_15_14, A7), 634 PINMUX_IPSR_DATA(IP0_15_14, A7),
635 PINMUX_IPSR_DATA(IP0_15_14, ST0_D3), 635 PINMUX_IPSR_DATA(IP0_15_14, ST0_D3),
636 PINMUX_IPSR_MODSEL_DATA(IP0_15_14, LCD_DATA7_A, SEL_LCDC_0), 636 PINMUX_IPSR_MSEL(IP0_15_14, LCD_DATA7_A, SEL_LCDC_0),
637 PINMUX_IPSR_MODSEL_DATA(IP0_15_14, TIOC0D_C, SEL_MTU2_CH0_1), 637 PINMUX_IPSR_MSEL(IP0_15_14, TIOC0D_C, SEL_MTU2_CH0_1),
638 638
639 PINMUX_IPSR_DATA(IP0_17_16, A8), 639 PINMUX_IPSR_DATA(IP0_17_16, A8),
640 PINMUX_IPSR_DATA(IP0_17_16, ST0_D4), 640 PINMUX_IPSR_DATA(IP0_17_16, ST0_D4),
641 PINMUX_IPSR_MODSEL_DATA(IP0_17_16, LCD_DATA8_A, SEL_LCDC_0), 641 PINMUX_IPSR_MSEL(IP0_17_16, LCD_DATA8_A, SEL_LCDC_0),
642 PINMUX_IPSR_MODSEL_DATA(IP0_17_16, TIOC1A_C, SEL_MTU2_CH1_2), 642 PINMUX_IPSR_MSEL(IP0_17_16, TIOC1A_C, SEL_MTU2_CH1_2),
643 643
644 PINMUX_IPSR_DATA(IP0_19_18, A9), 644 PINMUX_IPSR_DATA(IP0_19_18, A9),
645 PINMUX_IPSR_DATA(IP0_19_18, ST0_D5), 645 PINMUX_IPSR_DATA(IP0_19_18, ST0_D5),
646 PINMUX_IPSR_MODSEL_DATA(IP0_19_18, LCD_DATA9_A, SEL_LCDC_0), 646 PINMUX_IPSR_MSEL(IP0_19_18, LCD_DATA9_A, SEL_LCDC_0),
647 PINMUX_IPSR_MODSEL_DATA(IP0_19_18, TIOC1B_C, SEL_MTU2_CH1_2), 647 PINMUX_IPSR_MSEL(IP0_19_18, TIOC1B_C, SEL_MTU2_CH1_2),
648 648
649 PINMUX_IPSR_DATA(IP0_21_20, A10), 649 PINMUX_IPSR_DATA(IP0_21_20, A10),
650 PINMUX_IPSR_DATA(IP0_21_20, ST0_D6), 650 PINMUX_IPSR_DATA(IP0_21_20, ST0_D6),
651 PINMUX_IPSR_MODSEL_DATA(IP0_21_20, LCD_DATA10_A, SEL_LCDC_0), 651 PINMUX_IPSR_MSEL(IP0_21_20, LCD_DATA10_A, SEL_LCDC_0),
652 PINMUX_IPSR_MODSEL_DATA(IP0_21_20, TIOC2A_C, SEL_MTU2_CH2_2), 652 PINMUX_IPSR_MSEL(IP0_21_20, TIOC2A_C, SEL_MTU2_CH2_2),
653 653
654 PINMUX_IPSR_DATA(IP0_23_22, A11), 654 PINMUX_IPSR_DATA(IP0_23_22, A11),
655 PINMUX_IPSR_DATA(IP0_23_22, ST0_D7), 655 PINMUX_IPSR_DATA(IP0_23_22, ST0_D7),
656 PINMUX_IPSR_MODSEL_DATA(IP0_23_22, LCD_DATA11_A, SEL_LCDC_0), 656 PINMUX_IPSR_MSEL(IP0_23_22, LCD_DATA11_A, SEL_LCDC_0),
657 PINMUX_IPSR_MODSEL_DATA(IP0_23_22, TIOC2B_C, SEL_MTU2_CH2_2), 657 PINMUX_IPSR_MSEL(IP0_23_22, TIOC2B_C, SEL_MTU2_CH2_2),
658 658
659 PINMUX_IPSR_DATA(IP0_25_24, A12), 659 PINMUX_IPSR_DATA(IP0_25_24, A12),
660 PINMUX_IPSR_MODSEL_DATA(IP0_25_24, LCD_DATA12_A, SEL_LCDC_0), 660 PINMUX_IPSR_MSEL(IP0_25_24, LCD_DATA12_A, SEL_LCDC_0),
661 PINMUX_IPSR_MODSEL_DATA(IP0_25_24, TIOC3A_C, SEL_MTU2_CH3_1), 661 PINMUX_IPSR_MSEL(IP0_25_24, TIOC3A_C, SEL_MTU2_CH3_1),
662 662
663 PINMUX_IPSR_DATA(IP0_27_26, A13), 663 PINMUX_IPSR_DATA(IP0_27_26, A13),
664 PINMUX_IPSR_MODSEL_DATA(IP0_27_26, LCD_DATA13_A, SEL_LCDC_0), 664 PINMUX_IPSR_MSEL(IP0_27_26, LCD_DATA13_A, SEL_LCDC_0),
665 PINMUX_IPSR_MODSEL_DATA(IP0_27_26, TIOC3B_C, SEL_MTU2_CH3_1), 665 PINMUX_IPSR_MSEL(IP0_27_26, TIOC3B_C, SEL_MTU2_CH3_1),
666 666
667 PINMUX_IPSR_DATA(IP0_29_28, A14), 667 PINMUX_IPSR_DATA(IP0_29_28, A14),
668 PINMUX_IPSR_MODSEL_DATA(IP0_29_28, LCD_DATA14_A, SEL_LCDC_0), 668 PINMUX_IPSR_MSEL(IP0_29_28, LCD_DATA14_A, SEL_LCDC_0),
669 PINMUX_IPSR_MODSEL_DATA(IP0_29_28, TIOC3C_C, SEL_MTU2_CH3_1), 669 PINMUX_IPSR_MSEL(IP0_29_28, TIOC3C_C, SEL_MTU2_CH3_1),
670 670
671 PINMUX_IPSR_DATA(IP0_31_30, A15), 671 PINMUX_IPSR_DATA(IP0_31_30, A15),
672 PINMUX_IPSR_DATA(IP0_31_30, ST0_VCO_CLKIN), 672 PINMUX_IPSR_DATA(IP0_31_30, ST0_VCO_CLKIN),
673 PINMUX_IPSR_MODSEL_DATA(IP0_31_30, LCD_DATA15_A, SEL_LCDC_0), 673 PINMUX_IPSR_MSEL(IP0_31_30, LCD_DATA15_A, SEL_LCDC_0),
674 PINMUX_IPSR_MODSEL_DATA(IP0_31_30, TIOC3D_C, SEL_MTU2_CH3_1), 674 PINMUX_IPSR_MSEL(IP0_31_30, TIOC3D_C, SEL_MTU2_CH3_1),
675 675
676 676
677 /* IPSR1 */ 677 /* IPSR1 */
678 PINMUX_IPSR_DATA(IP1_1_0, A16), 678 PINMUX_IPSR_DATA(IP1_1_0, A16),
679 PINMUX_IPSR_DATA(IP1_1_0, ST0_PWM), 679 PINMUX_IPSR_DATA(IP1_1_0, ST0_PWM),
680 PINMUX_IPSR_MODSEL_DATA(IP1_1_0, LCD_DON_A, SEL_LCDC_0), 680 PINMUX_IPSR_MSEL(IP1_1_0, LCD_DON_A, SEL_LCDC_0),
681 PINMUX_IPSR_MODSEL_DATA(IP1_1_0, TIOC4A_C, SEL_MTU2_CH4_1), 681 PINMUX_IPSR_MSEL(IP1_1_0, TIOC4A_C, SEL_MTU2_CH4_1),
682 682
683 PINMUX_IPSR_DATA(IP1_3_2, A17), 683 PINMUX_IPSR_DATA(IP1_3_2, A17),
684 PINMUX_IPSR_DATA(IP1_3_2, ST1_VCO_CLKIN), 684 PINMUX_IPSR_DATA(IP1_3_2, ST1_VCO_CLKIN),
685 PINMUX_IPSR_MODSEL_DATA(IP1_3_2, LCD_CL1_A, SEL_LCDC_0), 685 PINMUX_IPSR_MSEL(IP1_3_2, LCD_CL1_A, SEL_LCDC_0),
686 PINMUX_IPSR_MODSEL_DATA(IP1_3_2, TIOC4B_C, SEL_MTU2_CH4_1), 686 PINMUX_IPSR_MSEL(IP1_3_2, TIOC4B_C, SEL_MTU2_CH4_1),
687 687
688 PINMUX_IPSR_DATA(IP1_5_4, A18), 688 PINMUX_IPSR_DATA(IP1_5_4, A18),
689 PINMUX_IPSR_DATA(IP1_5_4, ST1_PWM), 689 PINMUX_IPSR_DATA(IP1_5_4, ST1_PWM),
690 PINMUX_IPSR_MODSEL_DATA(IP1_5_4, LCD_CL2_A, SEL_LCDC_0), 690 PINMUX_IPSR_MSEL(IP1_5_4, LCD_CL2_A, SEL_LCDC_0),
691 PINMUX_IPSR_MODSEL_DATA(IP1_5_4, TIOC4C_C, SEL_MTU2_CH4_1), 691 PINMUX_IPSR_MSEL(IP1_5_4, TIOC4C_C, SEL_MTU2_CH4_1),
692 692
693 PINMUX_IPSR_DATA(IP1_7_6, A19), 693 PINMUX_IPSR_DATA(IP1_7_6, A19),
694 PINMUX_IPSR_DATA(IP1_7_6, ST1_CLKIN), 694 PINMUX_IPSR_DATA(IP1_7_6, ST1_CLKIN),
695 PINMUX_IPSR_MODSEL_DATA(IP1_7_6, LCD_CLK_A, SEL_LCDC_0), 695 PINMUX_IPSR_MSEL(IP1_7_6, LCD_CLK_A, SEL_LCDC_0),
696 PINMUX_IPSR_MODSEL_DATA(IP1_7_6, TIOC4D_C, SEL_MTU2_CH4_1), 696 PINMUX_IPSR_MSEL(IP1_7_6, TIOC4D_C, SEL_MTU2_CH4_1),
697 697
698 PINMUX_IPSR_DATA(IP1_9_8, A20), 698 PINMUX_IPSR_DATA(IP1_9_8, A20),
699 PINMUX_IPSR_DATA(IP1_9_8, ST1_REQ), 699 PINMUX_IPSR_DATA(IP1_9_8, ST1_REQ),
700 PINMUX_IPSR_MODSEL_DATA(IP1_9_8, LCD_FLM_A, SEL_LCDC_0), 700 PINMUX_IPSR_MSEL(IP1_9_8, LCD_FLM_A, SEL_LCDC_0),
701 701
702 PINMUX_IPSR_DATA(IP1_11_10, A21), 702 PINMUX_IPSR_DATA(IP1_11_10, A21),
703 PINMUX_IPSR_DATA(IP1_11_10, ST1_SYC), 703 PINMUX_IPSR_DATA(IP1_11_10, ST1_SYC),
704 PINMUX_IPSR_MODSEL_DATA(IP1_11_10, LCD_VCPWC_A, SEL_LCDC_0), 704 PINMUX_IPSR_MSEL(IP1_11_10, LCD_VCPWC_A, SEL_LCDC_0),
705 705
706 PINMUX_IPSR_DATA(IP1_13_12, A22), 706 PINMUX_IPSR_DATA(IP1_13_12, A22),
707 PINMUX_IPSR_DATA(IP1_13_12, ST1_VLD), 707 PINMUX_IPSR_DATA(IP1_13_12, ST1_VLD),
708 PINMUX_IPSR_MODSEL_DATA(IP1_13_12, LCD_VEPWC_A, SEL_LCDC_0), 708 PINMUX_IPSR_MSEL(IP1_13_12, LCD_VEPWC_A, SEL_LCDC_0),
709 709
710 PINMUX_IPSR_DATA(IP1_15_14, A23), 710 PINMUX_IPSR_DATA(IP1_15_14, A23),
711 PINMUX_IPSR_DATA(IP1_15_14, ST1_D0), 711 PINMUX_IPSR_DATA(IP1_15_14, ST1_D0),
712 PINMUX_IPSR_MODSEL_DATA(IP1_15_14, LCD_M_DISP_A, SEL_LCDC_0), 712 PINMUX_IPSR_MSEL(IP1_15_14, LCD_M_DISP_A, SEL_LCDC_0),
713 713
714 PINMUX_IPSR_DATA(IP1_17_16, A24), 714 PINMUX_IPSR_DATA(IP1_17_16, A24),
715 PINMUX_IPSR_MODSEL_DATA(IP1_17_16, RX2_D, SEL_SCIF2_3), 715 PINMUX_IPSR_MSEL(IP1_17_16, RX2_D, SEL_SCIF2_3),
716 PINMUX_IPSR_DATA(IP1_17_16, ST1_D1), 716 PINMUX_IPSR_DATA(IP1_17_16, ST1_D1),
717 717
718 PINMUX_IPSR_DATA(IP1_19_18, A25), 718 PINMUX_IPSR_DATA(IP1_19_18, A25),
719 PINMUX_IPSR_MODSEL_DATA(IP1_17_16, RX2_D, SEL_SCIF2_3), 719 PINMUX_IPSR_MSEL(IP1_17_16, RX2_D, SEL_SCIF2_3),
720 PINMUX_IPSR_DATA(IP1_17_16, ST1_D2), 720 PINMUX_IPSR_DATA(IP1_17_16, ST1_D2),
721 721
722 PINMUX_IPSR_DATA(IP1_22_20, D0), 722 PINMUX_IPSR_DATA(IP1_22_20, D0),
723 PINMUX_IPSR_MODSEL_DATA(IP1_22_20, SD0_DAT0_A, SEL_SDHI0_0), 723 PINMUX_IPSR_MSEL(IP1_22_20, SD0_DAT0_A, SEL_SDHI0_0),
724 PINMUX_IPSR_MODSEL_DATA(IP1_22_20, MMC_D0_A, SEL_MMC_0), 724 PINMUX_IPSR_MSEL(IP1_22_20, MMC_D0_A, SEL_MMC_0),
725 PINMUX_IPSR_DATA(IP1_22_20, ST1_D3), 725 PINMUX_IPSR_DATA(IP1_22_20, ST1_D3),
726 PINMUX_IPSR_MODSEL_DATA(IP1_22_20, FD0_A, SEL_FLCTL_0), 726 PINMUX_IPSR_MSEL(IP1_22_20, FD0_A, SEL_FLCTL_0),
727 727
728 PINMUX_IPSR_DATA(IP1_25_23, D1), 728 PINMUX_IPSR_DATA(IP1_25_23, D1),
729 PINMUX_IPSR_MODSEL_DATA(IP1_25_23, SD0_DAT0_A, SEL_SDHI0_0), 729 PINMUX_IPSR_MSEL(IP1_25_23, SD0_DAT0_A, SEL_SDHI0_0),
730 PINMUX_IPSR_MODSEL_DATA(IP1_25_23, MMC_D1_A, SEL_MMC_0), 730 PINMUX_IPSR_MSEL(IP1_25_23, MMC_D1_A, SEL_MMC_0),
731 PINMUX_IPSR_DATA(IP1_25_23, ST1_D4), 731 PINMUX_IPSR_DATA(IP1_25_23, ST1_D4),
732 PINMUX_IPSR_MODSEL_DATA(IP1_25_23, FD1_A, SEL_FLCTL_0), 732 PINMUX_IPSR_MSEL(IP1_25_23, FD1_A, SEL_FLCTL_0),
733 733
734 PINMUX_IPSR_DATA(IP1_28_26, D2), 734 PINMUX_IPSR_DATA(IP1_28_26, D2),
735 PINMUX_IPSR_MODSEL_DATA(IP1_28_26, SD0_DAT0_A, SEL_SDHI0_0), 735 PINMUX_IPSR_MSEL(IP1_28_26, SD0_DAT0_A, SEL_SDHI0_0),
736 PINMUX_IPSR_MODSEL_DATA(IP1_28_26, MMC_D2_A, SEL_MMC_0), 736 PINMUX_IPSR_MSEL(IP1_28_26, MMC_D2_A, SEL_MMC_0),
737 PINMUX_IPSR_DATA(IP1_28_26, ST1_D5), 737 PINMUX_IPSR_DATA(IP1_28_26, ST1_D5),
738 PINMUX_IPSR_MODSEL_DATA(IP1_28_26, FD2_A, SEL_FLCTL_0), 738 PINMUX_IPSR_MSEL(IP1_28_26, FD2_A, SEL_FLCTL_0),
739 739
740 PINMUX_IPSR_DATA(IP1_31_29, D3), 740 PINMUX_IPSR_DATA(IP1_31_29, D3),
741 PINMUX_IPSR_MODSEL_DATA(IP1_31_29, SD0_DAT0_A, SEL_SDHI0_0), 741 PINMUX_IPSR_MSEL(IP1_31_29, SD0_DAT0_A, SEL_SDHI0_0),
742 PINMUX_IPSR_MODSEL_DATA(IP1_31_29, MMC_D3_A, SEL_MMC_0), 742 PINMUX_IPSR_MSEL(IP1_31_29, MMC_D3_A, SEL_MMC_0),
743 PINMUX_IPSR_DATA(IP1_31_29, ST1_D6), 743 PINMUX_IPSR_DATA(IP1_31_29, ST1_D6),
744 PINMUX_IPSR_MODSEL_DATA(IP1_31_29, FD3_A, SEL_FLCTL_0), 744 PINMUX_IPSR_MSEL(IP1_31_29, FD3_A, SEL_FLCTL_0),
745 745
746 /* IPSR2 */ 746 /* IPSR2 */
747 PINMUX_IPSR_DATA(IP2_2_0, D4), 747 PINMUX_IPSR_DATA(IP2_2_0, D4),
748 PINMUX_IPSR_MODSEL_DATA(IP2_2_0, SD0_CD_A, SEL_SDHI0_0), 748 PINMUX_IPSR_MSEL(IP2_2_0, SD0_CD_A, SEL_SDHI0_0),
749 PINMUX_IPSR_MODSEL_DATA(IP2_2_0, MMC_D4_A, SEL_MMC_0), 749 PINMUX_IPSR_MSEL(IP2_2_0, MMC_D4_A, SEL_MMC_0),
750 PINMUX_IPSR_DATA(IP2_2_0, ST1_D7), 750 PINMUX_IPSR_DATA(IP2_2_0, ST1_D7),
751 PINMUX_IPSR_MODSEL_DATA(IP2_2_0, FD4_A, SEL_FLCTL_0), 751 PINMUX_IPSR_MSEL(IP2_2_0, FD4_A, SEL_FLCTL_0),
752 752
753 PINMUX_IPSR_DATA(IP2_4_3, D5), 753 PINMUX_IPSR_DATA(IP2_4_3, D5),
754 PINMUX_IPSR_MODSEL_DATA(IP2_4_3, SD0_WP_A, SEL_SDHI0_0), 754 PINMUX_IPSR_MSEL(IP2_4_3, SD0_WP_A, SEL_SDHI0_0),
755 PINMUX_IPSR_MODSEL_DATA(IP2_4_3, MMC_D5_A, SEL_MMC_0), 755 PINMUX_IPSR_MSEL(IP2_4_3, MMC_D5_A, SEL_MMC_0),
756 PINMUX_IPSR_MODSEL_DATA(IP2_4_3, FD5_A, SEL_FLCTL_0), 756 PINMUX_IPSR_MSEL(IP2_4_3, FD5_A, SEL_FLCTL_0),
757 757
758 PINMUX_IPSR_DATA(IP2_7_5, D6), 758 PINMUX_IPSR_DATA(IP2_7_5, D6),
759 PINMUX_IPSR_MODSEL_DATA(IP2_7_5, RSPI_RSPCK_A, SEL_RSPI_0), 759 PINMUX_IPSR_MSEL(IP2_7_5, RSPI_RSPCK_A, SEL_RSPI_0),
760 PINMUX_IPSR_MODSEL_DATA(IP2_7_5, MMC_D6_A, SEL_MMC_0), 760 PINMUX_IPSR_MSEL(IP2_7_5, MMC_D6_A, SEL_MMC_0),
761 PINMUX_IPSR_MODSEL_DATA(IP2_7_5, QSPCLK_A, SEL_RQSPI_0), 761 PINMUX_IPSR_MSEL(IP2_7_5, QSPCLK_A, SEL_RQSPI_0),
762 PINMUX_IPSR_MODSEL_DATA(IP2_7_5, FD6_A, SEL_FLCTL_0), 762 PINMUX_IPSR_MSEL(IP2_7_5, FD6_A, SEL_FLCTL_0),
763 763
764 PINMUX_IPSR_DATA(IP2_10_8, D7), 764 PINMUX_IPSR_DATA(IP2_10_8, D7),
765 PINMUX_IPSR_MODSEL_DATA(IP2_10_8, RSPI_SSL_A, SEL_RSPI_0), 765 PINMUX_IPSR_MSEL(IP2_10_8, RSPI_SSL_A, SEL_RSPI_0),
766 PINMUX_IPSR_MODSEL_DATA(IP2_10_8, MMC_D7_A, SEL_MMC_0), 766 PINMUX_IPSR_MSEL(IP2_10_8, MMC_D7_A, SEL_MMC_0),
767 PINMUX_IPSR_MODSEL_DATA(IP2_10_8, QSSL_A, SEL_RQSPI_0), 767 PINMUX_IPSR_MSEL(IP2_10_8, QSSL_A, SEL_RQSPI_0),
768 PINMUX_IPSR_MODSEL_DATA(IP2_10_8, FD7_A, SEL_FLCTL_0), 768 PINMUX_IPSR_MSEL(IP2_10_8, FD7_A, SEL_FLCTL_0),
769 769
770 PINMUX_IPSR_DATA(IP2_13_11, D8), 770 PINMUX_IPSR_DATA(IP2_13_11, D8),
771 PINMUX_IPSR_MODSEL_DATA(IP2_13_11, SD0_CLK_A, SEL_SDHI0_0), 771 PINMUX_IPSR_MSEL(IP2_13_11, SD0_CLK_A, SEL_SDHI0_0),
772 PINMUX_IPSR_MODSEL_DATA(IP2_13_11, MMC_CLK_A, SEL_MMC_0), 772 PINMUX_IPSR_MSEL(IP2_13_11, MMC_CLK_A, SEL_MMC_0),
773 PINMUX_IPSR_MODSEL_DATA(IP2_13_11, QIO2_A, SEL_RQSPI_0), 773 PINMUX_IPSR_MSEL(IP2_13_11, QIO2_A, SEL_RQSPI_0),
774 PINMUX_IPSR_MODSEL_DATA(IP2_13_11, FCE_A, SEL_FLCTL_0), 774 PINMUX_IPSR_MSEL(IP2_13_11, FCE_A, SEL_FLCTL_0),
775 PINMUX_IPSR_MODSEL_DATA(IP2_13_11, ET0_GTX_CLK_B, SEL_ET0_1), 775 PINMUX_IPSR_MSEL(IP2_13_11, ET0_GTX_CLK_B, SEL_ET0_1),
776 776
777 PINMUX_IPSR_DATA(IP2_16_14, D9), 777 PINMUX_IPSR_DATA(IP2_16_14, D9),
778 PINMUX_IPSR_MODSEL_DATA(IP2_16_14, SD0_CMD_A, SEL_SDHI0_0), 778 PINMUX_IPSR_MSEL(IP2_16_14, SD0_CMD_A, SEL_SDHI0_0),
779 PINMUX_IPSR_MODSEL_DATA(IP2_16_14, MMC_CMD_A, SEL_MMC_0), 779 PINMUX_IPSR_MSEL(IP2_16_14, MMC_CMD_A, SEL_MMC_0),
780 PINMUX_IPSR_MODSEL_DATA(IP2_16_14, QIO3_A, SEL_RQSPI_0), 780 PINMUX_IPSR_MSEL(IP2_16_14, QIO3_A, SEL_RQSPI_0),
781 PINMUX_IPSR_MODSEL_DATA(IP2_16_14, FCLE_A, SEL_FLCTL_0), 781 PINMUX_IPSR_MSEL(IP2_16_14, FCLE_A, SEL_FLCTL_0),
782 PINMUX_IPSR_MODSEL_DATA(IP2_16_14, ET0_ETXD1_B, SEL_ET0_1), 782 PINMUX_IPSR_MSEL(IP2_16_14, ET0_ETXD1_B, SEL_ET0_1),
783 783
784 PINMUX_IPSR_DATA(IP2_19_17, D10), 784 PINMUX_IPSR_DATA(IP2_19_17, D10),
785 PINMUX_IPSR_MODSEL_DATA(IP2_19_17, RSPI_MOSI_A, SEL_RSPI_0), 785 PINMUX_IPSR_MSEL(IP2_19_17, RSPI_MOSI_A, SEL_RSPI_0),
786 PINMUX_IPSR_MODSEL_DATA(IP2_19_17, QMO_QIO0_A, SEL_RQSPI_0), 786 PINMUX_IPSR_MSEL(IP2_19_17, QMO_QIO0_A, SEL_RQSPI_0),
787 PINMUX_IPSR_MODSEL_DATA(IP2_19_17, FALE_A, SEL_FLCTL_0), 787 PINMUX_IPSR_MSEL(IP2_19_17, FALE_A, SEL_FLCTL_0),
788 PINMUX_IPSR_MODSEL_DATA(IP2_19_17, ET0_ETXD2_B, SEL_ET0_1), 788 PINMUX_IPSR_MSEL(IP2_19_17, ET0_ETXD2_B, SEL_ET0_1),
789 789
790 PINMUX_IPSR_DATA(IP2_22_20, D11), 790 PINMUX_IPSR_DATA(IP2_22_20, D11),
791 PINMUX_IPSR_MODSEL_DATA(IP2_22_20, RSPI_MISO_A, SEL_RSPI_0), 791 PINMUX_IPSR_MSEL(IP2_22_20, RSPI_MISO_A, SEL_RSPI_0),
792 PINMUX_IPSR_MODSEL_DATA(IP2_22_20, QMI_QIO1_A, SEL_RQSPI_0), 792 PINMUX_IPSR_MSEL(IP2_22_20, QMI_QIO1_A, SEL_RQSPI_0),
793 PINMUX_IPSR_MODSEL_DATA(IP2_22_20, FRE_A, SEL_FLCTL_0), 793 PINMUX_IPSR_MSEL(IP2_22_20, FRE_A, SEL_FLCTL_0),
794 794
795 PINMUX_IPSR_DATA(IP2_24_23, D12), 795 PINMUX_IPSR_DATA(IP2_24_23, D12),
796 PINMUX_IPSR_MODSEL_DATA(IP2_24_23, FWE_A, SEL_FLCTL_0), 796 PINMUX_IPSR_MSEL(IP2_24_23, FWE_A, SEL_FLCTL_0),
797 PINMUX_IPSR_MODSEL_DATA(IP2_24_23, ET0_ETXD5_B, SEL_ET0_1), 797 PINMUX_IPSR_MSEL(IP2_24_23, ET0_ETXD5_B, SEL_ET0_1),
798 798
799 PINMUX_IPSR_DATA(IP2_27_25, D13), 799 PINMUX_IPSR_DATA(IP2_27_25, D13),
800 PINMUX_IPSR_MODSEL_DATA(IP2_27_25, RX2_B, SEL_SCIF2_1), 800 PINMUX_IPSR_MSEL(IP2_27_25, RX2_B, SEL_SCIF2_1),
801 PINMUX_IPSR_MODSEL_DATA(IP2_27_25, FRB_A, SEL_FLCTL_0), 801 PINMUX_IPSR_MSEL(IP2_27_25, FRB_A, SEL_FLCTL_0),
802 PINMUX_IPSR_MODSEL_DATA(IP2_27_25, ET0_ETXD6_B, SEL_ET0_1), 802 PINMUX_IPSR_MSEL(IP2_27_25, ET0_ETXD6_B, SEL_ET0_1),
803 803
804 PINMUX_IPSR_DATA(IP2_30_28, D14), 804 PINMUX_IPSR_DATA(IP2_30_28, D14),
805 PINMUX_IPSR_MODSEL_DATA(IP2_30_28, TX2_B, SEL_SCIF2_1), 805 PINMUX_IPSR_MSEL(IP2_30_28, TX2_B, SEL_SCIF2_1),
806 PINMUX_IPSR_MODSEL_DATA(IP2_30_28, FSE_A, SEL_FLCTL_0), 806 PINMUX_IPSR_MSEL(IP2_30_28, FSE_A, SEL_FLCTL_0),
807 PINMUX_IPSR_MODSEL_DATA(IP2_30_28, ET0_TX_CLK_B, SEL_ET0_1), 807 PINMUX_IPSR_MSEL(IP2_30_28, ET0_TX_CLK_B, SEL_ET0_1),
808 808
809 /* IPSR3 */ 809 /* IPSR3 */
810 PINMUX_IPSR_DATA(IP3_1_0, D15), 810 PINMUX_IPSR_DATA(IP3_1_0, D15),
811 PINMUX_IPSR_MODSEL_DATA(IP3_1_0, SCK2_B, SEL_SCIF2_1), 811 PINMUX_IPSR_MSEL(IP3_1_0, SCK2_B, SEL_SCIF2_1),
812 812
813 PINMUX_IPSR_DATA(IP3_2, CS1_A26), 813 PINMUX_IPSR_DATA(IP3_2, CS1_A26),
814 PINMUX_IPSR_MODSEL_DATA(IP3_2, QIO3_B, SEL_RQSPI_1), 814 PINMUX_IPSR_MSEL(IP3_2, QIO3_B, SEL_RQSPI_1),
815 815
816 PINMUX_IPSR_DATA(IP3_5_3, EX_CS1), 816 PINMUX_IPSR_DATA(IP3_5_3, EX_CS1),
817 PINMUX_IPSR_MODSEL_DATA(IP3_5_3, RX3_B, SEL_SCIF2_1), 817 PINMUX_IPSR_MSEL(IP3_5_3, RX3_B, SEL_SCIF2_1),
818 PINMUX_IPSR_DATA(IP3_5_3, ATACS0), 818 PINMUX_IPSR_DATA(IP3_5_3, ATACS0),
819 PINMUX_IPSR_MODSEL_DATA(IP3_5_3, QIO2_B, SEL_RQSPI_1), 819 PINMUX_IPSR_MSEL(IP3_5_3, QIO2_B, SEL_RQSPI_1),
820 PINMUX_IPSR_DATA(IP3_5_3, ET0_ETXD0), 820 PINMUX_IPSR_DATA(IP3_5_3, ET0_ETXD0),
821 821
822 PINMUX_IPSR_DATA(IP3_8_6, EX_CS2), 822 PINMUX_IPSR_DATA(IP3_8_6, EX_CS2),
823 PINMUX_IPSR_MODSEL_DATA(IP3_8_6, TX3_B, SEL_SCIF3_1), 823 PINMUX_IPSR_MSEL(IP3_8_6, TX3_B, SEL_SCIF3_1),
824 PINMUX_IPSR_DATA(IP3_8_6, ATACS1), 824 PINMUX_IPSR_DATA(IP3_8_6, ATACS1),
825 PINMUX_IPSR_MODSEL_DATA(IP3_8_6, QSPCLK_B, SEL_RQSPI_1), 825 PINMUX_IPSR_MSEL(IP3_8_6, QSPCLK_B, SEL_RQSPI_1),
826 PINMUX_IPSR_MODSEL_DATA(IP3_8_6, ET0_GTX_CLK_A, SEL_ET0_0), 826 PINMUX_IPSR_MSEL(IP3_8_6, ET0_GTX_CLK_A, SEL_ET0_0),
827 827
828 PINMUX_IPSR_DATA(IP3_11_9, EX_CS3), 828 PINMUX_IPSR_DATA(IP3_11_9, EX_CS3),
829 PINMUX_IPSR_MODSEL_DATA(IP3_11_9, SD1_CD_A, SEL_SDHI1_0), 829 PINMUX_IPSR_MSEL(IP3_11_9, SD1_CD_A, SEL_SDHI1_0),
830 PINMUX_IPSR_DATA(IP3_11_9, ATARD), 830 PINMUX_IPSR_DATA(IP3_11_9, ATARD),
831 PINMUX_IPSR_MODSEL_DATA(IP3_11_9, QMO_QIO0_B, SEL_RQSPI_1), 831 PINMUX_IPSR_MSEL(IP3_11_9, QMO_QIO0_B, SEL_RQSPI_1),
832 PINMUX_IPSR_MODSEL_DATA(IP3_11_9, ET0_ETXD1_A, SEL_ET0_0), 832 PINMUX_IPSR_MSEL(IP3_11_9, ET0_ETXD1_A, SEL_ET0_0),
833 833
834 PINMUX_IPSR_DATA(IP3_14_12, EX_CS4), 834 PINMUX_IPSR_DATA(IP3_14_12, EX_CS4),
835 PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SD1_WP_A, SEL_SDHI1_0), 835 PINMUX_IPSR_MSEL(IP3_14_12, SD1_WP_A, SEL_SDHI1_0),
836 PINMUX_IPSR_DATA(IP3_14_12, ATAWR), 836 PINMUX_IPSR_DATA(IP3_14_12, ATAWR),
837 PINMUX_IPSR_MODSEL_DATA(IP3_14_12, QMI_QIO1_B, SEL_RQSPI_1), 837 PINMUX_IPSR_MSEL(IP3_14_12, QMI_QIO1_B, SEL_RQSPI_1),
838 PINMUX_IPSR_MODSEL_DATA(IP3_14_12, ET0_ETXD2_A, SEL_ET0_0), 838 PINMUX_IPSR_MSEL(IP3_14_12, ET0_ETXD2_A, SEL_ET0_0),
839 839
840 PINMUX_IPSR_DATA(IP3_17_15, EX_CS5), 840 PINMUX_IPSR_DATA(IP3_17_15, EX_CS5),
841 PINMUX_IPSR_MODSEL_DATA(IP3_17_15, SD1_CMD_A, SEL_SDHI1_0), 841 PINMUX_IPSR_MSEL(IP3_17_15, SD1_CMD_A, SEL_SDHI1_0),
842 PINMUX_IPSR_DATA(IP3_17_15, ATADIR), 842 PINMUX_IPSR_DATA(IP3_17_15, ATADIR),
843 PINMUX_IPSR_MODSEL_DATA(IP3_17_15, QSSL_B, SEL_RQSPI_1), 843 PINMUX_IPSR_MSEL(IP3_17_15, QSSL_B, SEL_RQSPI_1),
844 PINMUX_IPSR_MODSEL_DATA(IP3_17_15, ET0_ETXD3_A, SEL_ET0_0), 844 PINMUX_IPSR_MSEL(IP3_17_15, ET0_ETXD3_A, SEL_ET0_0),
845 845
846 PINMUX_IPSR_DATA(IP3_19_18, RD_WR), 846 PINMUX_IPSR_DATA(IP3_19_18, RD_WR),
847 PINMUX_IPSR_DATA(IP3_19_18, TCLK0), 847 PINMUX_IPSR_DATA(IP3_19_18, TCLK0),
848 PINMUX_IPSR_MODSEL_DATA(IP3_19_18, CAN_CLK_B, SEL_RCAN_CLK_1), 848 PINMUX_IPSR_MSEL(IP3_19_18, CAN_CLK_B, SEL_RCAN_CLK_1),
849 PINMUX_IPSR_DATA(IP3_19_18, ET0_ETXD4), 849 PINMUX_IPSR_DATA(IP3_19_18, ET0_ETXD4),
850 850
851 PINMUX_IPSR_DATA(IP3_20, EX_WAIT0), 851 PINMUX_IPSR_DATA(IP3_20, EX_WAIT0),
852 PINMUX_IPSR_MODSEL_DATA(IP3_20, TCLK1_B, SEL_TMU_1), 852 PINMUX_IPSR_MSEL(IP3_20, TCLK1_B, SEL_TMU_1),
853 853
854 PINMUX_IPSR_DATA(IP3_23_21, EX_WAIT1), 854 PINMUX_IPSR_DATA(IP3_23_21, EX_WAIT1),
855 PINMUX_IPSR_MODSEL_DATA(IP3_23_21, SD1_DAT0_A, SEL_SDHI1_0), 855 PINMUX_IPSR_MSEL(IP3_23_21, SD1_DAT0_A, SEL_SDHI1_0),
856 PINMUX_IPSR_DATA(IP3_23_21, DREQ2), 856 PINMUX_IPSR_DATA(IP3_23_21, DREQ2),
857 PINMUX_IPSR_MODSEL_DATA(IP3_23_21, CAN1_TX_C, SEL_RCAN1_2), 857 PINMUX_IPSR_MSEL(IP3_23_21, CAN1_TX_C, SEL_RCAN1_2),
858 PINMUX_IPSR_MODSEL_DATA(IP3_23_21, ET0_LINK_C, SEL_ET0_CTL_2), 858 PINMUX_IPSR_MSEL(IP3_23_21, ET0_LINK_C, SEL_ET0_CTL_2),
859 PINMUX_IPSR_MODSEL_DATA(IP3_23_21, ET0_ETXD5_A, SEL_ET0_0), 859 PINMUX_IPSR_MSEL(IP3_23_21, ET0_ETXD5_A, SEL_ET0_0),
860 860
861 PINMUX_IPSR_DATA(IP3_26_24, EX_WAIT2), 861 PINMUX_IPSR_DATA(IP3_26_24, EX_WAIT2),
862 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, SD1_DAT1_A, SEL_SDHI1_0), 862 PINMUX_IPSR_MSEL(IP3_26_24, SD1_DAT1_A, SEL_SDHI1_0),
863 PINMUX_IPSR_DATA(IP3_26_24, DACK2), 863 PINMUX_IPSR_DATA(IP3_26_24, DACK2),
864 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, CAN1_RX_C, SEL_RCAN1_2), 864 PINMUX_IPSR_MSEL(IP3_26_24, CAN1_RX_C, SEL_RCAN1_2),
865 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, ET0_MAGIC_C, SEL_ET0_CTL_2), 865 PINMUX_IPSR_MSEL(IP3_26_24, ET0_MAGIC_C, SEL_ET0_CTL_2),
866 PINMUX_IPSR_MODSEL_DATA(IP3_26_24, ET0_ETXD6_A, SEL_ET0_0), 866 PINMUX_IPSR_MSEL(IP3_26_24, ET0_ETXD6_A, SEL_ET0_0),
867 867
868 PINMUX_IPSR_DATA(IP3_29_27, DRACK0), 868 PINMUX_IPSR_DATA(IP3_29_27, DRACK0),
869 PINMUX_IPSR_MODSEL_DATA(IP3_29_27, SD1_DAT2_A, SEL_SDHI1_0), 869 PINMUX_IPSR_MSEL(IP3_29_27, SD1_DAT2_A, SEL_SDHI1_0),
870 PINMUX_IPSR_DATA(IP3_29_27, ATAG), 870 PINMUX_IPSR_DATA(IP3_29_27, ATAG),
871 PINMUX_IPSR_MODSEL_DATA(IP3_29_27, TCLK1_A, SEL_TMU_0), 871 PINMUX_IPSR_MSEL(IP3_29_27, TCLK1_A, SEL_TMU_0),
872 PINMUX_IPSR_DATA(IP3_29_27, ET0_ETXD7), 872 PINMUX_IPSR_DATA(IP3_29_27, ET0_ETXD7),
873 873
874 /* IPSR4 */ 874 /* IPSR4 */
875 PINMUX_IPSR_MODSEL_DATA(IP4_2_0, HCTS0_A, SEL_HSCIF_0), 875 PINMUX_IPSR_MSEL(IP4_2_0, HCTS0_A, SEL_HSCIF_0),
876 PINMUX_IPSR_MODSEL_DATA(IP4_2_0, CTS1_A, SEL_SCIF1_0), 876 PINMUX_IPSR_MSEL(IP4_2_0, CTS1_A, SEL_SCIF1_0),
877 PINMUX_IPSR_DATA(IP4_2_0, VI0_FIELD), 877 PINMUX_IPSR_DATA(IP4_2_0, VI0_FIELD),
878 PINMUX_IPSR_MODSEL_DATA(IP4_2_0, RMII0_RXD1_A, SEL_RMII_0), 878 PINMUX_IPSR_MSEL(IP4_2_0, RMII0_RXD1_A, SEL_RMII_0),
879 PINMUX_IPSR_DATA(IP4_2_0, ET0_ERXD7), 879 PINMUX_IPSR_DATA(IP4_2_0, ET0_ERXD7),
880 880
881 PINMUX_IPSR_MODSEL_DATA(IP4_5_3, HRTS0_A, SEL_HSCIF_0), 881 PINMUX_IPSR_MSEL(IP4_5_3, HRTS0_A, SEL_HSCIF_0),
882 PINMUX_IPSR_MODSEL_DATA(IP4_5_3, RTS1_A, SEL_SCIF1_0), 882 PINMUX_IPSR_MSEL(IP4_5_3, RTS1_A, SEL_SCIF1_0),
883 PINMUX_IPSR_DATA(IP4_5_3, VI0_HSYNC), 883 PINMUX_IPSR_DATA(IP4_5_3, VI0_HSYNC),
884 PINMUX_IPSR_MODSEL_DATA(IP4_5_3, RMII0_TXD_EN_A, SEL_RMII_0), 884 PINMUX_IPSR_MSEL(IP4_5_3, RMII0_TXD_EN_A, SEL_RMII_0),
885 PINMUX_IPSR_DATA(IP4_5_3, ET0_RX_DV), 885 PINMUX_IPSR_DATA(IP4_5_3, ET0_RX_DV),
886 886
887 PINMUX_IPSR_MODSEL_DATA(IP4_8_6, HSCK0_A, SEL_HSCIF_0), 887 PINMUX_IPSR_MSEL(IP4_8_6, HSCK0_A, SEL_HSCIF_0),
888 PINMUX_IPSR_MODSEL_DATA(IP4_8_6, SCK1_A, SEL_SCIF1_0), 888 PINMUX_IPSR_MSEL(IP4_8_6, SCK1_A, SEL_SCIF1_0),
889 PINMUX_IPSR_DATA(IP4_8_6, VI0_VSYNC), 889 PINMUX_IPSR_DATA(IP4_8_6, VI0_VSYNC),
890 PINMUX_IPSR_MODSEL_DATA(IP4_8_6, RMII0_RX_ER_A, SEL_RMII_0), 890 PINMUX_IPSR_MSEL(IP4_8_6, RMII0_RX_ER_A, SEL_RMII_0),
891 PINMUX_IPSR_DATA(IP4_8_6, ET0_RX_ER), 891 PINMUX_IPSR_DATA(IP4_8_6, ET0_RX_ER),
892 892
893 PINMUX_IPSR_MODSEL_DATA(IP4_11_9, HRX0_A, SEL_HSCIF_0), 893 PINMUX_IPSR_MSEL(IP4_11_9, HRX0_A, SEL_HSCIF_0),
894 PINMUX_IPSR_MODSEL_DATA(IP4_11_9, RX1_A, SEL_SCIF1_0), 894 PINMUX_IPSR_MSEL(IP4_11_9, RX1_A, SEL_SCIF1_0),
895 PINMUX_IPSR_DATA(IP4_11_9, VI0_DATA0_VI0_B0), 895 PINMUX_IPSR_DATA(IP4_11_9, VI0_DATA0_VI0_B0),
896 PINMUX_IPSR_MODSEL_DATA(IP4_11_9, RMII0_CRS_DV_A, SEL_RMII_0), 896 PINMUX_IPSR_MSEL(IP4_11_9, RMII0_CRS_DV_A, SEL_RMII_0),
897 PINMUX_IPSR_DATA(IP4_11_9, ET0_CRS), 897 PINMUX_IPSR_DATA(IP4_11_9, ET0_CRS),
898 898
899 PINMUX_IPSR_MODSEL_DATA(IP4_14_12, HTX0_A, SEL_HSCIF_0), 899 PINMUX_IPSR_MSEL(IP4_14_12, HTX0_A, SEL_HSCIF_0),
900 PINMUX_IPSR_MODSEL_DATA(IP4_14_12, TX1_A, SEL_SCIF1_0), 900 PINMUX_IPSR_MSEL(IP4_14_12, TX1_A, SEL_SCIF1_0),
901 PINMUX_IPSR_DATA(IP4_14_12, VI0_DATA1_VI0_B1), 901 PINMUX_IPSR_DATA(IP4_14_12, VI0_DATA1_VI0_B1),
902 PINMUX_IPSR_MODSEL_DATA(IP4_14_12, RMII0_MDC_A, SEL_RMII_0), 902 PINMUX_IPSR_MSEL(IP4_14_12, RMII0_MDC_A, SEL_RMII_0),
903 PINMUX_IPSR_DATA(IP4_14_12, ET0_COL), 903 PINMUX_IPSR_DATA(IP4_14_12, ET0_COL),
904 904
905 PINMUX_IPSR_MODSEL_DATA(IP4_17_15, CTS0_B, SEL_SCIF0_1), 905 PINMUX_IPSR_MSEL(IP4_17_15, CTS0_B, SEL_SCIF0_1),
906 PINMUX_IPSR_DATA(IP4_17_15, VI0_DATA2_VI0_B2), 906 PINMUX_IPSR_DATA(IP4_17_15, VI0_DATA2_VI0_B2),
907 PINMUX_IPSR_MODSEL_DATA(IP4_17_15, RMII0_MDIO_A, SEL_RMII_0), 907 PINMUX_IPSR_MSEL(IP4_17_15, RMII0_MDIO_A, SEL_RMII_0),
908 PINMUX_IPSR_DATA(IP4_17_15, ET0_MDC), 908 PINMUX_IPSR_DATA(IP4_17_15, ET0_MDC),
909 909
910 PINMUX_IPSR_MODSEL_DATA(IP4_19_18, RTS0_B, SEL_SCIF0_1), 910 PINMUX_IPSR_MSEL(IP4_19_18, RTS0_B, SEL_SCIF0_1),
911 PINMUX_IPSR_DATA(IP4_19_18, VI0_DATA3_VI0_B3), 911 PINMUX_IPSR_DATA(IP4_19_18, VI0_DATA3_VI0_B3),
912 PINMUX_IPSR_MODSEL_DATA(IP4_19_18, ET0_MDIO_A, SEL_ET0_0), 912 PINMUX_IPSR_MSEL(IP4_19_18, ET0_MDIO_A, SEL_ET0_0),
913 913
914 PINMUX_IPSR_MODSEL_DATA(IP4_21_20, SCK1_B, SEL_SCIF1_1), 914 PINMUX_IPSR_MSEL(IP4_21_20, SCK1_B, SEL_SCIF1_1),
915 PINMUX_IPSR_DATA(IP4_21_20, VI0_DATA4_VI0_B4), 915 PINMUX_IPSR_DATA(IP4_21_20, VI0_DATA4_VI0_B4),
916 PINMUX_IPSR_MODSEL_DATA(IP4_21_20, ET0_LINK_A, SEL_ET0_CTL_0), 916 PINMUX_IPSR_MSEL(IP4_21_20, ET0_LINK_A, SEL_ET0_CTL_0),
917 917
918 PINMUX_IPSR_MODSEL_DATA(IP4_23_22, RX1_B, SEL_SCIF1_1), 918 PINMUX_IPSR_MSEL(IP4_23_22, RX1_B, SEL_SCIF1_1),
919 PINMUX_IPSR_DATA(IP4_23_22, VI0_DATA5_VI0_B5), 919 PINMUX_IPSR_DATA(IP4_23_22, VI0_DATA5_VI0_B5),
920 PINMUX_IPSR_MODSEL_DATA(IP4_23_22, ET0_MAGIC_A, SEL_ET0_CTL_0), 920 PINMUX_IPSR_MSEL(IP4_23_22, ET0_MAGIC_A, SEL_ET0_CTL_0),
921 921
922 PINMUX_IPSR_MODSEL_DATA(IP4_25_24, TX1_B, SEL_SCIF1_1), 922 PINMUX_IPSR_MSEL(IP4_25_24, TX1_B, SEL_SCIF1_1),
923 PINMUX_IPSR_DATA(IP4_25_24, VI0_DATA6_VI0_G0), 923 PINMUX_IPSR_DATA(IP4_25_24, VI0_DATA6_VI0_G0),
924 PINMUX_IPSR_MODSEL_DATA(IP4_25_24, ET0_PHY_INT_A, SEL_ET0_CTL_0), 924 PINMUX_IPSR_MSEL(IP4_25_24, ET0_PHY_INT_A, SEL_ET0_CTL_0),
925 925
926 PINMUX_IPSR_MODSEL_DATA(IP4_27_26, CTS1_B, SEL_SCIF1_1), 926 PINMUX_IPSR_MSEL(IP4_27_26, CTS1_B, SEL_SCIF1_1),
927 PINMUX_IPSR_DATA(IP4_27_26, VI0_DATA7_VI0_G1), 927 PINMUX_IPSR_DATA(IP4_27_26, VI0_DATA7_VI0_G1),
928 928
929 PINMUX_IPSR_MODSEL_DATA(IP4_29_28, RTS1_B, SEL_SCIF1_1), 929 PINMUX_IPSR_MSEL(IP4_29_28, RTS1_B, SEL_SCIF1_1),
930 PINMUX_IPSR_DATA(IP4_29_28, VI0_G2), 930 PINMUX_IPSR_DATA(IP4_29_28, VI0_G2),
931 931
932 PINMUX_IPSR_MODSEL_DATA(IP4_31_30, SCK2_A, SEL_SCIF2_0), 932 PINMUX_IPSR_MSEL(IP4_31_30, SCK2_A, SEL_SCIF2_0),
933 PINMUX_IPSR_DATA(IP4_31_30, VI0_G3), 933 PINMUX_IPSR_DATA(IP4_31_30, VI0_G3),
934 934
935 /* IPSR5 */ 935 /* IPSR5 */
936 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, SD2_CLK_A, SEL_SDHI2_0), 936 PINMUX_IPSR_MSEL(IP5_2_0, SD2_CLK_A, SEL_SDHI2_0),
937 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, RX2_A, SEL_SCIF2_0), 937 PINMUX_IPSR_MSEL(IP5_2_0, RX2_A, SEL_SCIF2_0),
938 PINMUX_IPSR_DATA(IP5_2_0, VI0_G4), 938 PINMUX_IPSR_DATA(IP5_2_0, VI0_G4),
939 PINMUX_IPSR_MODSEL_DATA(IP5_2_0, ET0_RX_CLK_B, SEL_ET0_1), 939 PINMUX_IPSR_MSEL(IP5_2_0, ET0_RX_CLK_B, SEL_ET0_1),
940 940
941 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, SD2_CMD_A, SEL_SDHI2_0), 941 PINMUX_IPSR_MSEL(IP5_5_3, SD2_CMD_A, SEL_SDHI2_0),
942 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, TX2_A, SEL_SCIF2_0), 942 PINMUX_IPSR_MSEL(IP5_5_3, TX2_A, SEL_SCIF2_0),
943 PINMUX_IPSR_DATA(IP5_5_3, VI0_G5), 943 PINMUX_IPSR_DATA(IP5_5_3, VI0_G5),
944 PINMUX_IPSR_MODSEL_DATA(IP5_5_3, ET0_ERXD2_B, SEL_ET0_1), 944 PINMUX_IPSR_MSEL(IP5_5_3, ET0_ERXD2_B, SEL_ET0_1),
945 945
946 PINMUX_IPSR_MODSEL_DATA(IP5_8_6, SD2_DAT0_A, SEL_SDHI2_0), 946 PINMUX_IPSR_MSEL(IP5_8_6, SD2_DAT0_A, SEL_SDHI2_0),
947 PINMUX_IPSR_MODSEL_DATA(IP5_8_6, RX3_A, SEL_SCIF3_0), 947 PINMUX_IPSR_MSEL(IP5_8_6, RX3_A, SEL_SCIF3_0),
948 PINMUX_IPSR_DATA(IP4_8_6, VI0_R0), 948 PINMUX_IPSR_DATA(IP4_8_6, VI0_R0),
949 PINMUX_IPSR_MODSEL_DATA(IP4_8_6, ET0_ERXD2_B, SEL_ET0_1), 949 PINMUX_IPSR_MSEL(IP4_8_6, ET0_ERXD2_B, SEL_ET0_1),
950 950
951 PINMUX_IPSR_MODSEL_DATA(IP5_11_9, SD2_DAT1_A, SEL_SDHI2_0), 951 PINMUX_IPSR_MSEL(IP5_11_9, SD2_DAT1_A, SEL_SDHI2_0),
952 PINMUX_IPSR_MODSEL_DATA(IP5_11_9, TX3_A, SEL_SCIF3_0), 952 PINMUX_IPSR_MSEL(IP5_11_9, TX3_A, SEL_SCIF3_0),
953 PINMUX_IPSR_DATA(IP5_11_9, VI0_R1), 953 PINMUX_IPSR_DATA(IP5_11_9, VI0_R1),
954 PINMUX_IPSR_MODSEL_DATA(IP5_11_9, ET0_MDIO_B, SEL_ET0_1), 954 PINMUX_IPSR_MSEL(IP5_11_9, ET0_MDIO_B, SEL_ET0_1),
955 955
956 PINMUX_IPSR_MODSEL_DATA(IP5_14_12, SD2_DAT2_A, SEL_SDHI2_0), 956 PINMUX_IPSR_MSEL(IP5_14_12, SD2_DAT2_A, SEL_SDHI2_0),
957 PINMUX_IPSR_MODSEL_DATA(IP5_14_12, RX4_A, SEL_SCIF4_0), 957 PINMUX_IPSR_MSEL(IP5_14_12, RX4_A, SEL_SCIF4_0),
958 PINMUX_IPSR_DATA(IP5_14_12, VI0_R2), 958 PINMUX_IPSR_DATA(IP5_14_12, VI0_R2),
959 PINMUX_IPSR_MODSEL_DATA(IP5_14_12, ET0_LINK_B, SEL_ET0_CTL_1), 959 PINMUX_IPSR_MSEL(IP5_14_12, ET0_LINK_B, SEL_ET0_CTL_1),
960 960
961 PINMUX_IPSR_MODSEL_DATA(IP5_17_15, SD2_DAT3_A, SEL_SDHI2_0), 961 PINMUX_IPSR_MSEL(IP5_17_15, SD2_DAT3_A, SEL_SDHI2_0),
962 PINMUX_IPSR_MODSEL_DATA(IP5_17_15, TX4_A, SEL_SCIF4_0), 962 PINMUX_IPSR_MSEL(IP5_17_15, TX4_A, SEL_SCIF4_0),
963 PINMUX_IPSR_DATA(IP5_17_15, VI0_R3), 963 PINMUX_IPSR_DATA(IP5_17_15, VI0_R3),
964 PINMUX_IPSR_MODSEL_DATA(IP5_17_15, ET0_MAGIC_B, SEL_ET0_CTL_1), 964 PINMUX_IPSR_MSEL(IP5_17_15, ET0_MAGIC_B, SEL_ET0_CTL_1),
965 965
966 PINMUX_IPSR_MODSEL_DATA(IP5_20_18, SD2_CD_A, SEL_SDHI2_0), 966 PINMUX_IPSR_MSEL(IP5_20_18, SD2_CD_A, SEL_SDHI2_0),
967 PINMUX_IPSR_MODSEL_DATA(IP5_20_18, RX5_A, SEL_SCIF5_0), 967 PINMUX_IPSR_MSEL(IP5_20_18, RX5_A, SEL_SCIF5_0),
968 PINMUX_IPSR_DATA(IP5_20_18, VI0_R4), 968 PINMUX_IPSR_DATA(IP5_20_18, VI0_R4),
969 PINMUX_IPSR_MODSEL_DATA(IP5_20_18, ET0_PHY_INT_B, SEL_ET0_CTL_1), 969 PINMUX_IPSR_MSEL(IP5_20_18, ET0_PHY_INT_B, SEL_ET0_CTL_1),
970 970
971 PINMUX_IPSR_MODSEL_DATA(IP5_22_21, SD2_WP_A, SEL_SDHI2_0), 971 PINMUX_IPSR_MSEL(IP5_22_21, SD2_WP_A, SEL_SDHI2_0),
972 PINMUX_IPSR_MODSEL_DATA(IP5_22_21, TX5_A, SEL_SCIF5_0), 972 PINMUX_IPSR_MSEL(IP5_22_21, TX5_A, SEL_SCIF5_0),
973 PINMUX_IPSR_DATA(IP5_22_21, VI0_R5), 973 PINMUX_IPSR_DATA(IP5_22_21, VI0_R5),
974 974
975 PINMUX_IPSR_DATA(IP5_24_23, REF125CK), 975 PINMUX_IPSR_DATA(IP5_24_23, REF125CK),
976 PINMUX_IPSR_DATA(IP5_24_23, ADTRG), 976 PINMUX_IPSR_DATA(IP5_24_23, ADTRG),
977 PINMUX_IPSR_MODSEL_DATA(IP5_24_23, RX5_C, SEL_SCIF5_2), 977 PINMUX_IPSR_MSEL(IP5_24_23, RX5_C, SEL_SCIF5_2),
978 PINMUX_IPSR_DATA(IP5_26_25, REF50CK), 978 PINMUX_IPSR_DATA(IP5_26_25, REF50CK),
979 PINMUX_IPSR_MODSEL_DATA(IP5_26_25, CTS1_E, SEL_SCIF1_3), 979 PINMUX_IPSR_MSEL(IP5_26_25, CTS1_E, SEL_SCIF1_3),
980 PINMUX_IPSR_MODSEL_DATA(IP5_26_25, HCTS0_D, SEL_HSCIF_3), 980 PINMUX_IPSR_MSEL(IP5_26_25, HCTS0_D, SEL_HSCIF_3),
981 981
982 /* IPSR6 */ 982 /* IPSR6 */
983 PINMUX_IPSR_DATA(IP6_2_0, DU0_DR0), 983 PINMUX_IPSR_DATA(IP6_2_0, DU0_DR0),
984 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, SCIF_CLK_B, SEL_SCIF_CLK_1), 984 PINMUX_IPSR_MSEL(IP6_2_0, SCIF_CLK_B, SEL_SCIF_CLK_1),
985 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, HRX0_D, SEL_HSCIF_3), 985 PINMUX_IPSR_MSEL(IP6_2_0, HRX0_D, SEL_HSCIF_3),
986 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, IETX_A, SEL_IEBUS_0), 986 PINMUX_IPSR_MSEL(IP6_2_0, IETX_A, SEL_IEBUS_0),
987 PINMUX_IPSR_MODSEL_DATA(IP6_2_0, TCLKA_A, SEL_MTU2_CLK_0), 987 PINMUX_IPSR_MSEL(IP6_2_0, TCLKA_A, SEL_MTU2_CLK_0),
988 PINMUX_IPSR_DATA(IP6_2_0, HIFD00), 988 PINMUX_IPSR_DATA(IP6_2_0, HIFD00),
989 989
990 PINMUX_IPSR_DATA(IP6_5_3, DU0_DR1), 990 PINMUX_IPSR_DATA(IP6_5_3, DU0_DR1),
991 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, SCK0_B, SEL_SCIF0_1), 991 PINMUX_IPSR_MSEL(IP6_5_3, SCK0_B, SEL_SCIF0_1),
992 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, HTX0_D, SEL_HSCIF_3), 992 PINMUX_IPSR_MSEL(IP6_5_3, HTX0_D, SEL_HSCIF_3),
993 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, IERX_A, SEL_IEBUS_0), 993 PINMUX_IPSR_MSEL(IP6_5_3, IERX_A, SEL_IEBUS_0),
994 PINMUX_IPSR_MODSEL_DATA(IP6_5_3, TCLKB_A, SEL_MTU2_CLK_0), 994 PINMUX_IPSR_MSEL(IP6_5_3, TCLKB_A, SEL_MTU2_CLK_0),
995 PINMUX_IPSR_DATA(IP6_5_3, HIFD01), 995 PINMUX_IPSR_DATA(IP6_5_3, HIFD01),
996 996
997 PINMUX_IPSR_DATA(IP6_7_6, DU0_DR2), 997 PINMUX_IPSR_DATA(IP6_7_6, DU0_DR2),
998 PINMUX_IPSR_MODSEL_DATA(IP6_7_6, RX0_B, SEL_SCIF0_1), 998 PINMUX_IPSR_MSEL(IP6_7_6, RX0_B, SEL_SCIF0_1),
999 PINMUX_IPSR_MODSEL_DATA(IP6_7_6, TCLKC_A, SEL_MTU2_CLK_0), 999 PINMUX_IPSR_MSEL(IP6_7_6, TCLKC_A, SEL_MTU2_CLK_0),
1000 PINMUX_IPSR_DATA(IP6_7_6, HIFD02), 1000 PINMUX_IPSR_DATA(IP6_7_6, HIFD02),
1001 1001
1002 PINMUX_IPSR_DATA(IP6_9_8, DU0_DR3), 1002 PINMUX_IPSR_DATA(IP6_9_8, DU0_DR3),
1003 PINMUX_IPSR_MODSEL_DATA(IP6_9_8, TX0_B, SEL_SCIF0_1), 1003 PINMUX_IPSR_MSEL(IP6_9_8, TX0_B, SEL_SCIF0_1),
1004 PINMUX_IPSR_MODSEL_DATA(IP6_9_8, TCLKD_A, SEL_MTU2_CLK_0), 1004 PINMUX_IPSR_MSEL(IP6_9_8, TCLKD_A, SEL_MTU2_CLK_0),
1005 PINMUX_IPSR_DATA(IP6_9_8, HIFD03), 1005 PINMUX_IPSR_DATA(IP6_9_8, HIFD03),
1006 1006
1007 PINMUX_IPSR_DATA(IP6_11_10, DU0_DR4), 1007 PINMUX_IPSR_DATA(IP6_11_10, DU0_DR4),
1008 PINMUX_IPSR_MODSEL_DATA(IP6_11_10, CTS0_C, SEL_SCIF0_2), 1008 PINMUX_IPSR_MSEL(IP6_11_10, CTS0_C, SEL_SCIF0_2),
1009 PINMUX_IPSR_MODSEL_DATA(IP6_11_10, TIOC0A_A, SEL_MTU2_CH0_0), 1009 PINMUX_IPSR_MSEL(IP6_11_10, TIOC0A_A, SEL_MTU2_CH0_0),
1010 PINMUX_IPSR_DATA(IP6_11_10, HIFD04), 1010 PINMUX_IPSR_DATA(IP6_11_10, HIFD04),
1011 1011
1012 PINMUX_IPSR_DATA(IP6_13_12, DU0_DR5), 1012 PINMUX_IPSR_DATA(IP6_13_12, DU0_DR5),
1013 PINMUX_IPSR_MODSEL_DATA(IP6_13_12, RTS0_C, SEL_SCIF0_1), 1013 PINMUX_IPSR_MSEL(IP6_13_12, RTS0_C, SEL_SCIF0_1),
1014 PINMUX_IPSR_MODSEL_DATA(IP6_13_12, TIOC0B_A, SEL_MTU2_CH0_0), 1014 PINMUX_IPSR_MSEL(IP6_13_12, TIOC0B_A, SEL_MTU2_CH0_0),
1015 PINMUX_IPSR_DATA(IP6_13_12, HIFD05), 1015 PINMUX_IPSR_DATA(IP6_13_12, HIFD05),
1016 1016
1017 PINMUX_IPSR_DATA(IP6_15_14, DU0_DR6), 1017 PINMUX_IPSR_DATA(IP6_15_14, DU0_DR6),
1018 PINMUX_IPSR_MODSEL_DATA(IP6_15_14, SCK1_C, SEL_SCIF1_2), 1018 PINMUX_IPSR_MSEL(IP6_15_14, SCK1_C, SEL_SCIF1_2),
1019 PINMUX_IPSR_MODSEL_DATA(IP6_15_14, TIOC0C_A, SEL_MTU2_CH0_0), 1019 PINMUX_IPSR_MSEL(IP6_15_14, TIOC0C_A, SEL_MTU2_CH0_0),
1020 PINMUX_IPSR_DATA(IP6_15_14, HIFD06), 1020 PINMUX_IPSR_DATA(IP6_15_14, HIFD06),
1021 1021
1022 PINMUX_IPSR_DATA(IP6_17_16, DU0_DR7), 1022 PINMUX_IPSR_DATA(IP6_17_16, DU0_DR7),
1023 PINMUX_IPSR_MODSEL_DATA(IP6_17_16, RX1_C, SEL_SCIF1_2), 1023 PINMUX_IPSR_MSEL(IP6_17_16, RX1_C, SEL_SCIF1_2),
1024 PINMUX_IPSR_MODSEL_DATA(IP6_17_16, TIOC0D_A, SEL_MTU2_CH0_0), 1024 PINMUX_IPSR_MSEL(IP6_17_16, TIOC0D_A, SEL_MTU2_CH0_0),
1025 PINMUX_IPSR_DATA(IP6_17_16, HIFD07), 1025 PINMUX_IPSR_DATA(IP6_17_16, HIFD07),
1026 1026
1027 PINMUX_IPSR_DATA(IP6_20_18, DU0_DG0), 1027 PINMUX_IPSR_DATA(IP6_20_18, DU0_DG0),
1028 PINMUX_IPSR_MODSEL_DATA(IP6_20_18, TX1_C, SEL_SCIF1_2), 1028 PINMUX_IPSR_MSEL(IP6_20_18, TX1_C, SEL_SCIF1_2),
1029 PINMUX_IPSR_MODSEL_DATA(IP6_20_18, HSCK0_D, SEL_HSCIF_3), 1029 PINMUX_IPSR_MSEL(IP6_20_18, HSCK0_D, SEL_HSCIF_3),
1030 PINMUX_IPSR_MODSEL_DATA(IP6_20_18, IECLK_A, SEL_IEBUS_0), 1030 PINMUX_IPSR_MSEL(IP6_20_18, IECLK_A, SEL_IEBUS_0),
1031 PINMUX_IPSR_MODSEL_DATA(IP6_20_18, TIOC1A_A, SEL_MTU2_CH1_0), 1031 PINMUX_IPSR_MSEL(IP6_20_18, TIOC1A_A, SEL_MTU2_CH1_0),
1032 PINMUX_IPSR_DATA(IP6_20_18, HIFD08), 1032 PINMUX_IPSR_DATA(IP6_20_18, HIFD08),
1033 1033
1034 PINMUX_IPSR_DATA(IP6_23_21, DU0_DG1), 1034 PINMUX_IPSR_DATA(IP6_23_21, DU0_DG1),
1035 PINMUX_IPSR_MODSEL_DATA(IP6_23_21, CTS1_C, SEL_SCIF1_2), 1035 PINMUX_IPSR_MSEL(IP6_23_21, CTS1_C, SEL_SCIF1_2),
1036 PINMUX_IPSR_MODSEL_DATA(IP6_23_21, HRTS0_D, SEL_HSCIF_3), 1036 PINMUX_IPSR_MSEL(IP6_23_21, HRTS0_D, SEL_HSCIF_3),
1037 PINMUX_IPSR_MODSEL_DATA(IP6_23_21, TIOC1B_A, SEL_MTU2_CH1_0), 1037 PINMUX_IPSR_MSEL(IP6_23_21, TIOC1B_A, SEL_MTU2_CH1_0),
1038 PINMUX_IPSR_DATA(IP6_23_21, HIFD09), 1038 PINMUX_IPSR_DATA(IP6_23_21, HIFD09),
1039 1039
1040 /* IPSR7 */ 1040 /* IPSR7 */
1041 PINMUX_IPSR_DATA(IP7_2_0, DU0_DG2), 1041 PINMUX_IPSR_DATA(IP7_2_0, DU0_DG2),
1042 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, RTS1_C, SEL_SCIF1_2), 1042 PINMUX_IPSR_MSEL(IP7_2_0, RTS1_C, SEL_SCIF1_2),
1043 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, RMII0_MDC_B, SEL_RMII_1), 1043 PINMUX_IPSR_MSEL(IP7_2_0, RMII0_MDC_B, SEL_RMII_1),
1044 PINMUX_IPSR_MODSEL_DATA(IP7_2_0, TIOC2A_A, SEL_MTU2_CH2_0), 1044 PINMUX_IPSR_MSEL(IP7_2_0, TIOC2A_A, SEL_MTU2_CH2_0),
1045 PINMUX_IPSR_DATA(IP7_2_0, HIFD10), 1045 PINMUX_IPSR_DATA(IP7_2_0, HIFD10),
1046 1046
1047 PINMUX_IPSR_DATA(IP7_5_3, DU0_DG3), 1047 PINMUX_IPSR_DATA(IP7_5_3, DU0_DG3),
1048 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, SCK2_C, SEL_SCIF2_2), 1048 PINMUX_IPSR_MSEL(IP7_5_3, SCK2_C, SEL_SCIF2_2),
1049 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, RMII0_MDIO_B, SEL_RMII_1), 1049 PINMUX_IPSR_MSEL(IP7_5_3, RMII0_MDIO_B, SEL_RMII_1),
1050 PINMUX_IPSR_MODSEL_DATA(IP7_5_3, TIOC2B_A, SEL_MTU2_CH2_0), 1050 PINMUX_IPSR_MSEL(IP7_5_3, TIOC2B_A, SEL_MTU2_CH2_0),
1051 PINMUX_IPSR_DATA(IP7_5_3, HIFD11), 1051 PINMUX_IPSR_DATA(IP7_5_3, HIFD11),
1052 1052
1053 PINMUX_IPSR_DATA(IP7_8_6, DU0_DG4), 1053 PINMUX_IPSR_DATA(IP7_8_6, DU0_DG4),
1054 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RX2_C, SEL_SCIF2_2), 1054 PINMUX_IPSR_MSEL(IP7_8_6, RX2_C, SEL_SCIF2_2),
1055 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, RMII0_CRS_DV_B, SEL_RMII_1), 1055 PINMUX_IPSR_MSEL(IP7_8_6, RMII0_CRS_DV_B, SEL_RMII_1),
1056 PINMUX_IPSR_MODSEL_DATA(IP7_8_6, TIOC3A_A, SEL_MTU2_CH3_0), 1056 PINMUX_IPSR_MSEL(IP7_8_6, TIOC3A_A, SEL_MTU2_CH3_0),
1057 PINMUX_IPSR_DATA(IP7_8_6, HIFD12), 1057 PINMUX_IPSR_DATA(IP7_8_6, HIFD12),
1058 1058
1059 PINMUX_IPSR_DATA(IP7_11_9, DU0_DG5), 1059 PINMUX_IPSR_DATA(IP7_11_9, DU0_DG5),
1060 PINMUX_IPSR_MODSEL_DATA(IP7_11_9, TX2_C, SEL_SCIF2_2), 1060 PINMUX_IPSR_MSEL(IP7_11_9, TX2_C, SEL_SCIF2_2),
1061 PINMUX_IPSR_MODSEL_DATA(IP7_11_9, RMII0_RX_ER_B, SEL_RMII_1), 1061 PINMUX_IPSR_MSEL(IP7_11_9, RMII0_RX_ER_B, SEL_RMII_1),
1062 PINMUX_IPSR_MODSEL_DATA(IP7_11_9, TIOC3B_A, SEL_MTU2_CH3_0), 1062 PINMUX_IPSR_MSEL(IP7_11_9, TIOC3B_A, SEL_MTU2_CH3_0),
1063 PINMUX_IPSR_DATA(IP7_11_9, HIFD13), 1063 PINMUX_IPSR_DATA(IP7_11_9, HIFD13),
1064 1064
1065 PINMUX_IPSR_DATA(IP7_14_12, DU0_DG6), 1065 PINMUX_IPSR_DATA(IP7_14_12, DU0_DG6),
1066 PINMUX_IPSR_MODSEL_DATA(IP7_14_12, RX3_C, SEL_SCIF3_2), 1066 PINMUX_IPSR_MSEL(IP7_14_12, RX3_C, SEL_SCIF3_2),
1067 PINMUX_IPSR_MODSEL_DATA(IP7_14_12, RMII0_RXD0_B, SEL_RMII_1), 1067 PINMUX_IPSR_MSEL(IP7_14_12, RMII0_RXD0_B, SEL_RMII_1),
1068 PINMUX_IPSR_MODSEL_DATA(IP7_14_12, TIOC3C_A, SEL_MTU2_CH3_0), 1068 PINMUX_IPSR_MSEL(IP7_14_12, TIOC3C_A, SEL_MTU2_CH3_0),
1069 PINMUX_IPSR_DATA(IP7_14_12, HIFD14), 1069 PINMUX_IPSR_DATA(IP7_14_12, HIFD14),
1070 1070
1071 PINMUX_IPSR_DATA(IP7_17_15, DU0_DG7), 1071 PINMUX_IPSR_DATA(IP7_17_15, DU0_DG7),
1072 PINMUX_IPSR_MODSEL_DATA(IP7_17_15, TX3_C, SEL_SCIF3_2), 1072 PINMUX_IPSR_MSEL(IP7_17_15, TX3_C, SEL_SCIF3_2),
1073 PINMUX_IPSR_MODSEL_DATA(IP7_17_15, RMII0_RXD1_B, SEL_RMII_1), 1073 PINMUX_IPSR_MSEL(IP7_17_15, RMII0_RXD1_B, SEL_RMII_1),
1074 PINMUX_IPSR_MODSEL_DATA(IP7_17_15, TIOC3D_A, SEL_MTU2_CH3_0), 1074 PINMUX_IPSR_MSEL(IP7_17_15, TIOC3D_A, SEL_MTU2_CH3_0),
1075 PINMUX_IPSR_DATA(IP7_17_15, HIFD15), 1075 PINMUX_IPSR_DATA(IP7_17_15, HIFD15),
1076 1076
1077 PINMUX_IPSR_DATA(IP7_20_18, DU0_DB0), 1077 PINMUX_IPSR_DATA(IP7_20_18, DU0_DB0),
1078 PINMUX_IPSR_MODSEL_DATA(IP7_20_18, RX4_C, SEL_SCIF4_2), 1078 PINMUX_IPSR_MSEL(IP7_20_18, RX4_C, SEL_SCIF4_2),
1079 PINMUX_IPSR_MODSEL_DATA(IP7_20_18, RMII0_TXD_EN_B, SEL_RMII_1), 1079 PINMUX_IPSR_MSEL(IP7_20_18, RMII0_TXD_EN_B, SEL_RMII_1),
1080 PINMUX_IPSR_MODSEL_DATA(IP7_20_18, TIOC4A_A, SEL_MTU2_CH4_0), 1080 PINMUX_IPSR_MSEL(IP7_20_18, TIOC4A_A, SEL_MTU2_CH4_0),
1081 PINMUX_IPSR_DATA(IP7_20_18, HIFCS), 1081 PINMUX_IPSR_DATA(IP7_20_18, HIFCS),
1082 1082
1083 PINMUX_IPSR_DATA(IP7_23_21, DU0_DB1), 1083 PINMUX_IPSR_DATA(IP7_23_21, DU0_DB1),
1084 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TX4_C, SEL_SCIF4_2), 1084 PINMUX_IPSR_MSEL(IP7_23_21, TX4_C, SEL_SCIF4_2),
1085 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, RMII0_TXD0_B, SEL_RMII_1), 1085 PINMUX_IPSR_MSEL(IP7_23_21, RMII0_TXD0_B, SEL_RMII_1),
1086 PINMUX_IPSR_MODSEL_DATA(IP7_23_21, TIOC4B_A, SEL_MTU2_CH4_0), 1086 PINMUX_IPSR_MSEL(IP7_23_21, TIOC4B_A, SEL_MTU2_CH4_0),
1087 PINMUX_IPSR_DATA(IP7_23_21, HIFWR), 1087 PINMUX_IPSR_DATA(IP7_23_21, HIFWR),
1088 1088
1089 PINMUX_IPSR_DATA(IP7_26_24, DU0_DB2), 1089 PINMUX_IPSR_DATA(IP7_26_24, DU0_DB2),
1090 PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RX5_B, SEL_SCIF5_1), 1090 PINMUX_IPSR_MSEL(IP7_26_24, RX5_B, SEL_SCIF5_1),
1091 PINMUX_IPSR_MODSEL_DATA(IP7_26_24, RMII0_TXD1_B, SEL_RMII_1), 1091 PINMUX_IPSR_MSEL(IP7_26_24, RMII0_TXD1_B, SEL_RMII_1),
1092 PINMUX_IPSR_MODSEL_DATA(IP7_26_24, TIOC4C_A, SEL_MTU2_CH4_0), 1092 PINMUX_IPSR_MSEL(IP7_26_24, TIOC4C_A, SEL_MTU2_CH4_0),
1093 1093
1094 PINMUX_IPSR_DATA(IP7_28_27, DU0_DB3), 1094 PINMUX_IPSR_DATA(IP7_28_27, DU0_DB3),
1095 PINMUX_IPSR_MODSEL_DATA(IP7_28_27, TX5_B, SEL_SCIF5_1), 1095 PINMUX_IPSR_MSEL(IP7_28_27, TX5_B, SEL_SCIF5_1),
1096 PINMUX_IPSR_MODSEL_DATA(IP7_28_27, TIOC4D_A, SEL_MTU2_CH4_0), 1096 PINMUX_IPSR_MSEL(IP7_28_27, TIOC4D_A, SEL_MTU2_CH4_0),
1097 PINMUX_IPSR_DATA(IP7_28_27, HIFRD), 1097 PINMUX_IPSR_DATA(IP7_28_27, HIFRD),
1098 1098
1099 PINMUX_IPSR_DATA(IP7_30_29, DU0_DB4), 1099 PINMUX_IPSR_DATA(IP7_30_29, DU0_DB4),
@@ -1107,251 +1107,251 @@ static const u16 pinmux_data[] = {
1107 PINMUX_IPSR_DATA(IP8_3_2, HIFRDY), 1107 PINMUX_IPSR_DATA(IP8_3_2, HIFRDY),
1108 1108
1109 PINMUX_IPSR_DATA(IP8_5_4, DU0_DB7), 1109 PINMUX_IPSR_DATA(IP8_5_4, DU0_DB7),
1110 PINMUX_IPSR_MODSEL_DATA(IP8_5_4, SSI_SCK0_B, SEL_SSI0_1), 1110 PINMUX_IPSR_MSEL(IP8_5_4, SSI_SCK0_B, SEL_SSI0_1),
1111 PINMUX_IPSR_MODSEL_DATA(IP8_5_4, HIFEBL_B, SEL_HIF_1), 1111 PINMUX_IPSR_MSEL(IP8_5_4, HIFEBL_B, SEL_HIF_1),
1112 1112
1113 PINMUX_IPSR_DATA(IP8_7_6, DU0_DOTCLKIN), 1113 PINMUX_IPSR_DATA(IP8_7_6, DU0_DOTCLKIN),
1114 PINMUX_IPSR_MODSEL_DATA(IP8_7_6, HSPI_CS0_C, SEL_HSPI_2), 1114 PINMUX_IPSR_MSEL(IP8_7_6, HSPI_CS0_C, SEL_HSPI_2),
1115 PINMUX_IPSR_MODSEL_DATA(IP8_7_6, SSI_WS0_B, SEL_SSI0_1), 1115 PINMUX_IPSR_MSEL(IP8_7_6, SSI_WS0_B, SEL_SSI0_1),
1116 1116
1117 PINMUX_IPSR_DATA(IP8_9_8, DU0_DOTCLKOUT), 1117 PINMUX_IPSR_DATA(IP8_9_8, DU0_DOTCLKOUT),
1118 PINMUX_IPSR_MODSEL_DATA(IP8_9_8, HSPI_CLK0_C, SEL_HSPI_2), 1118 PINMUX_IPSR_MSEL(IP8_9_8, HSPI_CLK0_C, SEL_HSPI_2),
1119 PINMUX_IPSR_MODSEL_DATA(IP8_9_8, SSI_SDATA0_B, SEL_SSI0_1), 1119 PINMUX_IPSR_MSEL(IP8_9_8, SSI_SDATA0_B, SEL_SSI0_1),
1120 1120
1121 PINMUX_IPSR_DATA(IP8_11_10, DU0_EXHSYNC_DU0_HSYNC), 1121 PINMUX_IPSR_DATA(IP8_11_10, DU0_EXHSYNC_DU0_HSYNC),
1122 PINMUX_IPSR_MODSEL_DATA(IP8_11_10, HSPI_TX0_C, SEL_HSPI_2), 1122 PINMUX_IPSR_MSEL(IP8_11_10, HSPI_TX0_C, SEL_HSPI_2),
1123 PINMUX_IPSR_MODSEL_DATA(IP8_11_10, SSI_SCK1_B, SEL_SSI1_1), 1123 PINMUX_IPSR_MSEL(IP8_11_10, SSI_SCK1_B, SEL_SSI1_1),
1124 1124
1125 PINMUX_IPSR_DATA(IP8_13_12, DU0_EXVSYNC_DU0_VSYNC), 1125 PINMUX_IPSR_DATA(IP8_13_12, DU0_EXVSYNC_DU0_VSYNC),
1126 PINMUX_IPSR_MODSEL_DATA(IP8_13_12, HSPI_RX0_C, SEL_HSPI_2), 1126 PINMUX_IPSR_MSEL(IP8_13_12, HSPI_RX0_C, SEL_HSPI_2),
1127 PINMUX_IPSR_MODSEL_DATA(IP8_13_12, SSI_WS1_B, SEL_SSI1_1), 1127 PINMUX_IPSR_MSEL(IP8_13_12, SSI_WS1_B, SEL_SSI1_1),
1128 1128
1129 PINMUX_IPSR_DATA(IP8_15_14, DU0_EXODDF_DU0_ODDF), 1129 PINMUX_IPSR_DATA(IP8_15_14, DU0_EXODDF_DU0_ODDF),
1130 PINMUX_IPSR_MODSEL_DATA(IP8_15_14, CAN0_RX_B, SEL_RCAN0_1), 1130 PINMUX_IPSR_MSEL(IP8_15_14, CAN0_RX_B, SEL_RCAN0_1),
1131 PINMUX_IPSR_MODSEL_DATA(IP8_15_14, HSCK0_B, SEL_HSCIF_1), 1131 PINMUX_IPSR_MSEL(IP8_15_14, HSCK0_B, SEL_HSCIF_1),
1132 PINMUX_IPSR_MODSEL_DATA(IP8_15_14, SSI_SDATA1_B, SEL_SSI1_1), 1132 PINMUX_IPSR_MSEL(IP8_15_14, SSI_SDATA1_B, SEL_SSI1_1),
1133 1133
1134 PINMUX_IPSR_DATA(IP8_17_16, DU0_DISP), 1134 PINMUX_IPSR_DATA(IP8_17_16, DU0_DISP),
1135 PINMUX_IPSR_MODSEL_DATA(IP8_17_16, CAN0_TX_B, SEL_RCAN0_1), 1135 PINMUX_IPSR_MSEL(IP8_17_16, CAN0_TX_B, SEL_RCAN0_1),
1136 PINMUX_IPSR_MODSEL_DATA(IP8_17_16, HRX0_B, SEL_HSCIF_1), 1136 PINMUX_IPSR_MSEL(IP8_17_16, HRX0_B, SEL_HSCIF_1),
1137 PINMUX_IPSR_MODSEL_DATA(IP8_17_16, AUDIO_CLKA_B, SEL_AUDIO_CLKA_1), 1137 PINMUX_IPSR_MSEL(IP8_17_16, AUDIO_CLKA_B, SEL_AUDIO_CLKA_1),
1138 1138
1139 PINMUX_IPSR_DATA(IP8_19_18, DU0_CDE), 1139 PINMUX_IPSR_DATA(IP8_19_18, DU0_CDE),
1140 PINMUX_IPSR_MODSEL_DATA(IP8_19_18, HTX0_B, SEL_HSCIF_1), 1140 PINMUX_IPSR_MSEL(IP8_19_18, HTX0_B, SEL_HSCIF_1),
1141 PINMUX_IPSR_MODSEL_DATA(IP8_19_18, AUDIO_CLKB_B, SEL_AUDIO_CLKB_1), 1141 PINMUX_IPSR_MSEL(IP8_19_18, AUDIO_CLKB_B, SEL_AUDIO_CLKB_1),
1142 PINMUX_IPSR_MODSEL_DATA(IP8_19_18, LCD_VCPWC_B, SEL_LCDC_1), 1142 PINMUX_IPSR_MSEL(IP8_19_18, LCD_VCPWC_B, SEL_LCDC_1),
1143 1143
1144 PINMUX_IPSR_MODSEL_DATA(IP8_22_20, IRQ0_A, SEL_INTC_0), 1144 PINMUX_IPSR_MSEL(IP8_22_20, IRQ0_A, SEL_INTC_0),
1145 PINMUX_IPSR_MODSEL_DATA(IP8_22_20, HSPI_TX_B, SEL_HSPI_1), 1145 PINMUX_IPSR_MSEL(IP8_22_20, HSPI_TX_B, SEL_HSPI_1),
1146 PINMUX_IPSR_MODSEL_DATA(IP8_22_20, RX3_E, SEL_SCIF3_4), 1146 PINMUX_IPSR_MSEL(IP8_22_20, RX3_E, SEL_SCIF3_4),
1147 PINMUX_IPSR_DATA(IP8_22_20, ET0_ERXD0), 1147 PINMUX_IPSR_DATA(IP8_22_20, ET0_ERXD0),
1148 1148
1149 PINMUX_IPSR_MODSEL_DATA(IP8_25_23, IRQ1_A, SEL_INTC_0), 1149 PINMUX_IPSR_MSEL(IP8_25_23, IRQ1_A, SEL_INTC_0),
1150 PINMUX_IPSR_MODSEL_DATA(IP8_25_23, HSPI_RX_B, SEL_HSPI_1), 1150 PINMUX_IPSR_MSEL(IP8_25_23, HSPI_RX_B, SEL_HSPI_1),
1151 PINMUX_IPSR_MODSEL_DATA(IP8_25_23, TX3_E, SEL_SCIF3_4), 1151 PINMUX_IPSR_MSEL(IP8_25_23, TX3_E, SEL_SCIF3_4),
1152 PINMUX_IPSR_DATA(IP8_25_23, ET0_ERXD1), 1152 PINMUX_IPSR_DATA(IP8_25_23, ET0_ERXD1),
1153 1153
1154 PINMUX_IPSR_MODSEL_DATA(IP8_27_26, IRQ2_A, SEL_INTC_0), 1154 PINMUX_IPSR_MSEL(IP8_27_26, IRQ2_A, SEL_INTC_0),
1155 PINMUX_IPSR_MODSEL_DATA(IP8_27_26, CTS0_A, SEL_SCIF0_0), 1155 PINMUX_IPSR_MSEL(IP8_27_26, CTS0_A, SEL_SCIF0_0),
1156 PINMUX_IPSR_MODSEL_DATA(IP8_27_26, HCTS0_B, SEL_HSCIF_1), 1156 PINMUX_IPSR_MSEL(IP8_27_26, HCTS0_B, SEL_HSCIF_1),
1157 PINMUX_IPSR_MODSEL_DATA(IP8_27_26, ET0_ERXD2_A, SEL_ET0_0), 1157 PINMUX_IPSR_MSEL(IP8_27_26, ET0_ERXD2_A, SEL_ET0_0),
1158 1158
1159 PINMUX_IPSR_MODSEL_DATA(IP8_29_28, IRQ3_A, SEL_INTC_0), 1159 PINMUX_IPSR_MSEL(IP8_29_28, IRQ3_A, SEL_INTC_0),
1160 PINMUX_IPSR_MODSEL_DATA(IP8_29_28, RTS0_A, SEL_SCIF0_0), 1160 PINMUX_IPSR_MSEL(IP8_29_28, RTS0_A, SEL_SCIF0_0),
1161 PINMUX_IPSR_MODSEL_DATA(IP8_29_28, HRTS0_B, SEL_HSCIF_1), 1161 PINMUX_IPSR_MSEL(IP8_29_28, HRTS0_B, SEL_HSCIF_1),
1162 PINMUX_IPSR_MODSEL_DATA(IP8_29_28, ET0_ERXD3_A, SEL_ET0_0), 1162 PINMUX_IPSR_MSEL(IP8_29_28, ET0_ERXD3_A, SEL_ET0_0),
1163 1163
1164 /* IPSR9 */ 1164 /* IPSR9 */
1165 PINMUX_IPSR_MODSEL_DATA(IP9_1_0, VI1_CLK_A, SEL_VIN1_0), 1165 PINMUX_IPSR_MSEL(IP9_1_0, VI1_CLK_A, SEL_VIN1_0),
1166 PINMUX_IPSR_MODSEL_DATA(IP9_1_0, FD0_B, SEL_FLCTL_1), 1166 PINMUX_IPSR_MSEL(IP9_1_0, FD0_B, SEL_FLCTL_1),
1167 PINMUX_IPSR_MODSEL_DATA(IP9_1_0, LCD_DATA0_B, SEL_LCDC_1), 1167 PINMUX_IPSR_MSEL(IP9_1_0, LCD_DATA0_B, SEL_LCDC_1),
1168 1168
1169 PINMUX_IPSR_MODSEL_DATA(IP9_3_2, VI1_0_A, SEL_VIN1_0), 1169 PINMUX_IPSR_MSEL(IP9_3_2, VI1_0_A, SEL_VIN1_0),
1170 PINMUX_IPSR_MODSEL_DATA(IP9_3_2, FD1_B, SEL_FLCTL_1), 1170 PINMUX_IPSR_MSEL(IP9_3_2, FD1_B, SEL_FLCTL_1),
1171 PINMUX_IPSR_MODSEL_DATA(IP9_3_2, LCD_DATA1_B, SEL_LCDC_1), 1171 PINMUX_IPSR_MSEL(IP9_3_2, LCD_DATA1_B, SEL_LCDC_1),
1172 1172
1173 PINMUX_IPSR_MODSEL_DATA(IP9_5_4, VI1_1_A, SEL_VIN1_0), 1173 PINMUX_IPSR_MSEL(IP9_5_4, VI1_1_A, SEL_VIN1_0),
1174 PINMUX_IPSR_MODSEL_DATA(IP9_5_4, FD2_B, SEL_FLCTL_1), 1174 PINMUX_IPSR_MSEL(IP9_5_4, FD2_B, SEL_FLCTL_1),
1175 PINMUX_IPSR_MODSEL_DATA(IP9_5_4, LCD_DATA2_B, SEL_LCDC_1), 1175 PINMUX_IPSR_MSEL(IP9_5_4, LCD_DATA2_B, SEL_LCDC_1),
1176 1176
1177 PINMUX_IPSR_MODSEL_DATA(IP9_7_6, VI1_2_A, SEL_VIN1_0), 1177 PINMUX_IPSR_MSEL(IP9_7_6, VI1_2_A, SEL_VIN1_0),
1178 PINMUX_IPSR_MODSEL_DATA(IP9_7_6, FD3_B, SEL_FLCTL_1), 1178 PINMUX_IPSR_MSEL(IP9_7_6, FD3_B, SEL_FLCTL_1),
1179 PINMUX_IPSR_MODSEL_DATA(IP9_7_6, LCD_DATA3_B, SEL_LCDC_1), 1179 PINMUX_IPSR_MSEL(IP9_7_6, LCD_DATA3_B, SEL_LCDC_1),
1180 1180
1181 PINMUX_IPSR_MODSEL_DATA(IP9_9_8, VI1_3_A, SEL_VIN1_0), 1181 PINMUX_IPSR_MSEL(IP9_9_8, VI1_3_A, SEL_VIN1_0),
1182 PINMUX_IPSR_MODSEL_DATA(IP9_9_8, FD4_B, SEL_FLCTL_1), 1182 PINMUX_IPSR_MSEL(IP9_9_8, FD4_B, SEL_FLCTL_1),
1183 PINMUX_IPSR_MODSEL_DATA(IP9_9_8, LCD_DATA4_B, SEL_LCDC_1), 1183 PINMUX_IPSR_MSEL(IP9_9_8, LCD_DATA4_B, SEL_LCDC_1),
1184 1184
1185 PINMUX_IPSR_MODSEL_DATA(IP9_11_10, VI1_4_A, SEL_VIN1_0), 1185 PINMUX_IPSR_MSEL(IP9_11_10, VI1_4_A, SEL_VIN1_0),
1186 PINMUX_IPSR_MODSEL_DATA(IP9_11_10, FD5_B, SEL_FLCTL_1), 1186 PINMUX_IPSR_MSEL(IP9_11_10, FD5_B, SEL_FLCTL_1),
1187 PINMUX_IPSR_MODSEL_DATA(IP9_11_10, LCD_DATA5_B, SEL_LCDC_1), 1187 PINMUX_IPSR_MSEL(IP9_11_10, LCD_DATA5_B, SEL_LCDC_1),
1188 1188
1189 PINMUX_IPSR_MODSEL_DATA(IP9_13_12, VI1_5_A, SEL_VIN1_0), 1189 PINMUX_IPSR_MSEL(IP9_13_12, VI1_5_A, SEL_VIN1_0),
1190 PINMUX_IPSR_MODSEL_DATA(IP9_13_12, FD6_B, SEL_FLCTL_1), 1190 PINMUX_IPSR_MSEL(IP9_13_12, FD6_B, SEL_FLCTL_1),
1191 PINMUX_IPSR_MODSEL_DATA(IP9_13_12, LCD_DATA6_B, SEL_LCDC_1), 1191 PINMUX_IPSR_MSEL(IP9_13_12, LCD_DATA6_B, SEL_LCDC_1),
1192 1192
1193 PINMUX_IPSR_MODSEL_DATA(IP9_15_14, VI1_6_A, SEL_VIN1_0), 1193 PINMUX_IPSR_MSEL(IP9_15_14, VI1_6_A, SEL_VIN1_0),
1194 PINMUX_IPSR_MODSEL_DATA(IP9_15_14, FD7_B, SEL_FLCTL_1), 1194 PINMUX_IPSR_MSEL(IP9_15_14, FD7_B, SEL_FLCTL_1),
1195 PINMUX_IPSR_MODSEL_DATA(IP9_15_14, LCD_DATA7_B, SEL_LCDC_1), 1195 PINMUX_IPSR_MSEL(IP9_15_14, LCD_DATA7_B, SEL_LCDC_1),
1196 1196
1197 PINMUX_IPSR_MODSEL_DATA(IP9_17_16, VI1_7_A, SEL_VIN1_0), 1197 PINMUX_IPSR_MSEL(IP9_17_16, VI1_7_A, SEL_VIN1_0),
1198 PINMUX_IPSR_MODSEL_DATA(IP9_17_16, FCE_B, SEL_FLCTL_1), 1198 PINMUX_IPSR_MSEL(IP9_17_16, FCE_B, SEL_FLCTL_1),
1199 PINMUX_IPSR_MODSEL_DATA(IP9_17_16, LCD_DATA8_B, SEL_LCDC_1), 1199 PINMUX_IPSR_MSEL(IP9_17_16, LCD_DATA8_B, SEL_LCDC_1),
1200 1200
1201 PINMUX_IPSR_MODSEL_DATA(IP9_19_18, SSI_SCK0_A, SEL_SSI0_0), 1201 PINMUX_IPSR_MSEL(IP9_19_18, SSI_SCK0_A, SEL_SSI0_0),
1202 PINMUX_IPSR_MODSEL_DATA(IP9_19_18, TIOC1A_B, SEL_MTU2_CH1_1), 1202 PINMUX_IPSR_MSEL(IP9_19_18, TIOC1A_B, SEL_MTU2_CH1_1),
1203 PINMUX_IPSR_MODSEL_DATA(IP9_19_18, LCD_DATA9_B, SEL_LCDC_1), 1203 PINMUX_IPSR_MSEL(IP9_19_18, LCD_DATA9_B, SEL_LCDC_1),
1204 1204
1205 PINMUX_IPSR_MODSEL_DATA(IP9_21_20, SSI_WS0_A, SEL_SSI0_0), 1205 PINMUX_IPSR_MSEL(IP9_21_20, SSI_WS0_A, SEL_SSI0_0),
1206 PINMUX_IPSR_MODSEL_DATA(IP9_21_20, TIOC1B_B, SEL_MTU2_CH1_1), 1206 PINMUX_IPSR_MSEL(IP9_21_20, TIOC1B_B, SEL_MTU2_CH1_1),
1207 PINMUX_IPSR_MODSEL_DATA(IP9_21_20, LCD_DATA10_B, SEL_LCDC_1), 1207 PINMUX_IPSR_MSEL(IP9_21_20, LCD_DATA10_B, SEL_LCDC_1),
1208 1208
1209 PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SSI_SDATA0_A, SEL_SSI0_0), 1209 PINMUX_IPSR_MSEL(IP9_23_22, SSI_SDATA0_A, SEL_SSI0_0),
1210 PINMUX_IPSR_MODSEL_DATA(IP9_23_22, VI1_0_B, SEL_VIN1_1), 1210 PINMUX_IPSR_MSEL(IP9_23_22, VI1_0_B, SEL_VIN1_1),
1211 PINMUX_IPSR_MODSEL_DATA(IP9_23_22, TIOC2A_B, SEL_MTU2_CH2_1), 1211 PINMUX_IPSR_MSEL(IP9_23_22, TIOC2A_B, SEL_MTU2_CH2_1),
1212 PINMUX_IPSR_MODSEL_DATA(IP9_23_22, LCD_DATA11_B, SEL_LCDC_1), 1212 PINMUX_IPSR_MSEL(IP9_23_22, LCD_DATA11_B, SEL_LCDC_1),
1213 1213
1214 PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SSI_SCK1_A, SEL_SSI1_0), 1214 PINMUX_IPSR_MSEL(IP9_25_24, SSI_SCK1_A, SEL_SSI1_0),
1215 PINMUX_IPSR_MODSEL_DATA(IP9_25_24, VI1_1_B, SEL_VIN1_1), 1215 PINMUX_IPSR_MSEL(IP9_25_24, VI1_1_B, SEL_VIN1_1),
1216 PINMUX_IPSR_MODSEL_DATA(IP9_25_24, TIOC2B_B, SEL_MTU2_CH2_1), 1216 PINMUX_IPSR_MSEL(IP9_25_24, TIOC2B_B, SEL_MTU2_CH2_1),
1217 PINMUX_IPSR_MODSEL_DATA(IP9_25_24, LCD_DATA12_B, SEL_LCDC_1), 1217 PINMUX_IPSR_MSEL(IP9_25_24, LCD_DATA12_B, SEL_LCDC_1),
1218 1218
1219 PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SSI_WS1_A, SEL_SSI1_0), 1219 PINMUX_IPSR_MSEL(IP9_27_26, SSI_WS1_A, SEL_SSI1_0),
1220 PINMUX_IPSR_MODSEL_DATA(IP9_27_26, VI1_2_B, SEL_VIN1_1), 1220 PINMUX_IPSR_MSEL(IP9_27_26, VI1_2_B, SEL_VIN1_1),
1221 PINMUX_IPSR_MODSEL_DATA(IP9_27_26, LCD_DATA13_B, SEL_LCDC_1), 1221 PINMUX_IPSR_MSEL(IP9_27_26, LCD_DATA13_B, SEL_LCDC_1),
1222 1222
1223 PINMUX_IPSR_MODSEL_DATA(IP9_29_28, SSI_SDATA1_A, SEL_SSI1_0), 1223 PINMUX_IPSR_MSEL(IP9_29_28, SSI_SDATA1_A, SEL_SSI1_0),
1224 PINMUX_IPSR_MODSEL_DATA(IP9_29_28, VI1_3_B, SEL_VIN1_1), 1224 PINMUX_IPSR_MSEL(IP9_29_28, VI1_3_B, SEL_VIN1_1),
1225 PINMUX_IPSR_MODSEL_DATA(IP9_29_28, LCD_DATA14_B, SEL_LCDC_1), 1225 PINMUX_IPSR_MSEL(IP9_29_28, LCD_DATA14_B, SEL_LCDC_1),
1226 1226
1227 /* IPSE10 */ 1227 /* IPSE10 */
1228 PINMUX_IPSR_DATA(IP10_2_0, SSI_SCK23), 1228 PINMUX_IPSR_DATA(IP10_2_0, SSI_SCK23),
1229 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, VI1_4_B, SEL_VIN1_1), 1229 PINMUX_IPSR_MSEL(IP10_2_0, VI1_4_B, SEL_VIN1_1),
1230 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, RX1_D, SEL_SCIF1_3), 1230 PINMUX_IPSR_MSEL(IP10_2_0, RX1_D, SEL_SCIF1_3),
1231 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, FCLE_B, SEL_FLCTL_1), 1231 PINMUX_IPSR_MSEL(IP10_2_0, FCLE_B, SEL_FLCTL_1),
1232 PINMUX_IPSR_MODSEL_DATA(IP10_2_0, LCD_DATA15_B, SEL_LCDC_1), 1232 PINMUX_IPSR_MSEL(IP10_2_0, LCD_DATA15_B, SEL_LCDC_1),
1233 1233
1234 PINMUX_IPSR_DATA(IP10_5_3, SSI_WS23), 1234 PINMUX_IPSR_DATA(IP10_5_3, SSI_WS23),
1235 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, VI1_5_B, SEL_VIN1_1), 1235 PINMUX_IPSR_MSEL(IP10_5_3, VI1_5_B, SEL_VIN1_1),
1236 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, TX1_D, SEL_SCIF1_3), 1236 PINMUX_IPSR_MSEL(IP10_5_3, TX1_D, SEL_SCIF1_3),
1237 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, HSCK0_C, SEL_HSCIF_2), 1237 PINMUX_IPSR_MSEL(IP10_5_3, HSCK0_C, SEL_HSCIF_2),
1238 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, FALE_B, SEL_FLCTL_1), 1238 PINMUX_IPSR_MSEL(IP10_5_3, FALE_B, SEL_FLCTL_1),
1239 PINMUX_IPSR_MODSEL_DATA(IP10_5_3, LCD_DON_B, SEL_LCDC_1), 1239 PINMUX_IPSR_MSEL(IP10_5_3, LCD_DON_B, SEL_LCDC_1),
1240 1240
1241 PINMUX_IPSR_DATA(IP10_8_6, SSI_SDATA2), 1241 PINMUX_IPSR_DATA(IP10_8_6, SSI_SDATA2),
1242 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, VI1_6_B, SEL_VIN1_1), 1242 PINMUX_IPSR_MSEL(IP10_8_6, VI1_6_B, SEL_VIN1_1),
1243 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, HRX0_C, SEL_HSCIF_2), 1243 PINMUX_IPSR_MSEL(IP10_8_6, HRX0_C, SEL_HSCIF_2),
1244 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, FRE_B, SEL_FLCTL_1), 1244 PINMUX_IPSR_MSEL(IP10_8_6, FRE_B, SEL_FLCTL_1),
1245 PINMUX_IPSR_MODSEL_DATA(IP10_8_6, LCD_CL1_B, SEL_LCDC_1), 1245 PINMUX_IPSR_MSEL(IP10_8_6, LCD_CL1_B, SEL_LCDC_1),
1246 1246
1247 PINMUX_IPSR_DATA(IP10_11_9, SSI_SDATA3), 1247 PINMUX_IPSR_DATA(IP10_11_9, SSI_SDATA3),
1248 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, VI1_7_B, SEL_VIN1_1), 1248 PINMUX_IPSR_MSEL(IP10_11_9, VI1_7_B, SEL_VIN1_1),
1249 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, HTX0_C, SEL_HSCIF_2), 1249 PINMUX_IPSR_MSEL(IP10_11_9, HTX0_C, SEL_HSCIF_2),
1250 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, FWE_B, SEL_FLCTL_1), 1250 PINMUX_IPSR_MSEL(IP10_11_9, FWE_B, SEL_FLCTL_1),
1251 PINMUX_IPSR_MODSEL_DATA(IP10_11_9, LCD_CL2_B, SEL_LCDC_1), 1251 PINMUX_IPSR_MSEL(IP10_11_9, LCD_CL2_B, SEL_LCDC_1),
1252 1252
1253 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, AUDIO_CLKA_A, SEL_AUDIO_CLKA_0), 1253 PINMUX_IPSR_MSEL(IP10_14_12, AUDIO_CLKA_A, SEL_AUDIO_CLKA_0),
1254 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, VI1_CLK_B, SEL_VIN1_1), 1254 PINMUX_IPSR_MSEL(IP10_14_12, VI1_CLK_B, SEL_VIN1_1),
1255 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, SCK1_D, SEL_SCIF1_3), 1255 PINMUX_IPSR_MSEL(IP10_14_12, SCK1_D, SEL_SCIF1_3),
1256 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, IECLK_B, SEL_IEBUS_1), 1256 PINMUX_IPSR_MSEL(IP10_14_12, IECLK_B, SEL_IEBUS_1),
1257 PINMUX_IPSR_MODSEL_DATA(IP10_14_12, LCD_FLM_B, SEL_LCDC_1), 1257 PINMUX_IPSR_MSEL(IP10_14_12, LCD_FLM_B, SEL_LCDC_1),
1258 1258
1259 PINMUX_IPSR_MODSEL_DATA(IP10_15, AUDIO_CLKB_A, SEL_AUDIO_CLKB_0), 1259 PINMUX_IPSR_MSEL(IP10_15, AUDIO_CLKB_A, SEL_AUDIO_CLKB_0),
1260 PINMUX_IPSR_MODSEL_DATA(IP10_15, LCD_CLK_B, SEL_LCDC_1), 1260 PINMUX_IPSR_MSEL(IP10_15, LCD_CLK_B, SEL_LCDC_1),
1261 1261
1262 PINMUX_IPSR_DATA(IP10_18_16, AUDIO_CLKC), 1262 PINMUX_IPSR_DATA(IP10_18_16, AUDIO_CLKC),
1263 PINMUX_IPSR_MODSEL_DATA(IP10_18_16, SCK1_E, SEL_SCIF1_4), 1263 PINMUX_IPSR_MSEL(IP10_18_16, SCK1_E, SEL_SCIF1_4),
1264 PINMUX_IPSR_MODSEL_DATA(IP10_18_16, HCTS0_C, SEL_HSCIF_2), 1264 PINMUX_IPSR_MSEL(IP10_18_16, HCTS0_C, SEL_HSCIF_2),
1265 PINMUX_IPSR_MODSEL_DATA(IP10_18_16, FRB_B, SEL_FLCTL_1), 1265 PINMUX_IPSR_MSEL(IP10_18_16, FRB_B, SEL_FLCTL_1),
1266 PINMUX_IPSR_MODSEL_DATA(IP10_18_16, LCD_VEPWC_B, SEL_LCDC_1), 1266 PINMUX_IPSR_MSEL(IP10_18_16, LCD_VEPWC_B, SEL_LCDC_1),
1267 1267
1268 PINMUX_IPSR_DATA(IP10_21_19, AUDIO_CLKOUT), 1268 PINMUX_IPSR_DATA(IP10_21_19, AUDIO_CLKOUT),
1269 PINMUX_IPSR_MODSEL_DATA(IP10_21_19, TX1_E, SEL_SCIF1_4), 1269 PINMUX_IPSR_MSEL(IP10_21_19, TX1_E, SEL_SCIF1_4),
1270 PINMUX_IPSR_MODSEL_DATA(IP10_21_19, HRTS0_C, SEL_HSCIF_2), 1270 PINMUX_IPSR_MSEL(IP10_21_19, HRTS0_C, SEL_HSCIF_2),
1271 PINMUX_IPSR_MODSEL_DATA(IP10_21_19, FSE_B, SEL_FLCTL_1), 1271 PINMUX_IPSR_MSEL(IP10_21_19, FSE_B, SEL_FLCTL_1),
1272 PINMUX_IPSR_MODSEL_DATA(IP10_21_19, LCD_M_DISP_B, SEL_LCDC_1), 1272 PINMUX_IPSR_MSEL(IP10_21_19, LCD_M_DISP_B, SEL_LCDC_1),
1273 1273
1274 PINMUX_IPSR_MODSEL_DATA(IP10_22, CAN_CLK_A, SEL_RCAN_CLK_0), 1274 PINMUX_IPSR_MSEL(IP10_22, CAN_CLK_A, SEL_RCAN_CLK_0),
1275 PINMUX_IPSR_MODSEL_DATA(IP10_22, RX4_D, SEL_SCIF4_3), 1275 PINMUX_IPSR_MSEL(IP10_22, RX4_D, SEL_SCIF4_3),
1276 1276
1277 PINMUX_IPSR_MODSEL_DATA(IP10_24_23, CAN0_TX_A, SEL_RCAN0_0), 1277 PINMUX_IPSR_MSEL(IP10_24_23, CAN0_TX_A, SEL_RCAN0_0),
1278 PINMUX_IPSR_MODSEL_DATA(IP10_24_23, TX4_D, SEL_SCIF4_3), 1278 PINMUX_IPSR_MSEL(IP10_24_23, TX4_D, SEL_SCIF4_3),
1279 PINMUX_IPSR_DATA(IP10_24_23, MLB_CLK), 1279 PINMUX_IPSR_DATA(IP10_24_23, MLB_CLK),
1280 1280
1281 PINMUX_IPSR_MODSEL_DATA(IP10_25, CAN1_RX_A, SEL_RCAN1_0), 1281 PINMUX_IPSR_MSEL(IP10_25, CAN1_RX_A, SEL_RCAN1_0),
1282 PINMUX_IPSR_MODSEL_DATA(IP10_25, IRQ1_B, SEL_INTC_1), 1282 PINMUX_IPSR_MSEL(IP10_25, IRQ1_B, SEL_INTC_1),
1283 1283
1284 PINMUX_IPSR_MODSEL_DATA(IP10_27_26, CAN0_RX_A, SEL_RCAN0_0), 1284 PINMUX_IPSR_MSEL(IP10_27_26, CAN0_RX_A, SEL_RCAN0_0),
1285 PINMUX_IPSR_MODSEL_DATA(IP10_27_26, IRQ0_B, SEL_INTC_1), 1285 PINMUX_IPSR_MSEL(IP10_27_26, IRQ0_B, SEL_INTC_1),
1286 PINMUX_IPSR_DATA(IP10_27_26, MLB_SIG), 1286 PINMUX_IPSR_DATA(IP10_27_26, MLB_SIG),
1287 1287
1288 PINMUX_IPSR_MODSEL_DATA(IP10_29_28, CAN1_TX_A, SEL_RCAN1_0), 1288 PINMUX_IPSR_MSEL(IP10_29_28, CAN1_TX_A, SEL_RCAN1_0),
1289 PINMUX_IPSR_MODSEL_DATA(IP10_29_28, TX5_C, SEL_SCIF1_2), 1289 PINMUX_IPSR_MSEL(IP10_29_28, TX5_C, SEL_SCIF1_2),
1290 PINMUX_IPSR_DATA(IP10_29_28, MLB_DAT), 1290 PINMUX_IPSR_DATA(IP10_29_28, MLB_DAT),
1291 1291
1292 /* IPSR11 */ 1292 /* IPSR11 */
1293 PINMUX_IPSR_DATA(IP11_0, SCL1), 1293 PINMUX_IPSR_DATA(IP11_0, SCL1),
1294 PINMUX_IPSR_MODSEL_DATA(IP11_0, SCIF_CLK_C, SEL_SCIF_CLK_2), 1294 PINMUX_IPSR_MSEL(IP11_0, SCIF_CLK_C, SEL_SCIF_CLK_2),
1295 1295
1296 PINMUX_IPSR_DATA(IP11_1, SDA1), 1296 PINMUX_IPSR_DATA(IP11_1, SDA1),
1297 PINMUX_IPSR_MODSEL_DATA(IP11_0, RX1_E, SEL_SCIF1_4), 1297 PINMUX_IPSR_MSEL(IP11_0, RX1_E, SEL_SCIF1_4),
1298 1298
1299 PINMUX_IPSR_DATA(IP11_2, SDA0), 1299 PINMUX_IPSR_DATA(IP11_2, SDA0),
1300 PINMUX_IPSR_MODSEL_DATA(IP11_2, HIFEBL_A, SEL_HIF_0), 1300 PINMUX_IPSR_MSEL(IP11_2, HIFEBL_A, SEL_HIF_0),
1301 1301
1302 PINMUX_IPSR_DATA(IP11_3, SDSELF), 1302 PINMUX_IPSR_DATA(IP11_3, SDSELF),
1303 PINMUX_IPSR_MODSEL_DATA(IP11_3, RTS1_E, SEL_SCIF1_3), 1303 PINMUX_IPSR_MSEL(IP11_3, RTS1_E, SEL_SCIF1_3),
1304 1304
1305 PINMUX_IPSR_MODSEL_DATA(IP11_6_4, SCIF_CLK_A, SEL_SCIF_CLK_0), 1305 PINMUX_IPSR_MSEL(IP11_6_4, SCIF_CLK_A, SEL_SCIF_CLK_0),
1306 PINMUX_IPSR_MODSEL_DATA(IP11_6_4, HSPI_CLK_A, SEL_HSPI_0), 1306 PINMUX_IPSR_MSEL(IP11_6_4, HSPI_CLK_A, SEL_HSPI_0),
1307 PINMUX_IPSR_DATA(IP11_6_4, VI0_CLK), 1307 PINMUX_IPSR_DATA(IP11_6_4, VI0_CLK),
1308 PINMUX_IPSR_MODSEL_DATA(IP11_6_4, RMII0_TXD0_A, SEL_RMII_0), 1308 PINMUX_IPSR_MSEL(IP11_6_4, RMII0_TXD0_A, SEL_RMII_0),
1309 PINMUX_IPSR_DATA(IP11_6_4, ET0_ERXD4), 1309 PINMUX_IPSR_DATA(IP11_6_4, ET0_ERXD4),
1310 1310
1311 PINMUX_IPSR_MODSEL_DATA(IP11_9_7, SCK0_A, SEL_SCIF0_0), 1311 PINMUX_IPSR_MSEL(IP11_9_7, SCK0_A, SEL_SCIF0_0),
1312 PINMUX_IPSR_MODSEL_DATA(IP11_9_7, HSPI_CS_A, SEL_HSPI_0), 1312 PINMUX_IPSR_MSEL(IP11_9_7, HSPI_CS_A, SEL_HSPI_0),
1313 PINMUX_IPSR_DATA(IP11_9_7, VI0_CLKENB), 1313 PINMUX_IPSR_DATA(IP11_9_7, VI0_CLKENB),
1314 PINMUX_IPSR_MODSEL_DATA(IP11_9_7, RMII0_TXD1_A, SEL_RMII_0), 1314 PINMUX_IPSR_MSEL(IP11_9_7, RMII0_TXD1_A, SEL_RMII_0),
1315 PINMUX_IPSR_DATA(IP11_9_7, ET0_ERXD5), 1315 PINMUX_IPSR_DATA(IP11_9_7, ET0_ERXD5),
1316 1316
1317 PINMUX_IPSR_MODSEL_DATA(IP11_11_10, RX0_A, SEL_SCIF0_0), 1317 PINMUX_IPSR_MSEL(IP11_11_10, RX0_A, SEL_SCIF0_0),
1318 PINMUX_IPSR_MODSEL_DATA(IP11_11_10, HSPI_RX_A, SEL_HSPI_0), 1318 PINMUX_IPSR_MSEL(IP11_11_10, HSPI_RX_A, SEL_HSPI_0),
1319 PINMUX_IPSR_MODSEL_DATA(IP11_11_10, RMII0_RXD0_A, SEL_RMII_0), 1319 PINMUX_IPSR_MSEL(IP11_11_10, RMII0_RXD0_A, SEL_RMII_0),
1320 PINMUX_IPSR_DATA(IP11_11_10, ET0_ERXD6), 1320 PINMUX_IPSR_DATA(IP11_11_10, ET0_ERXD6),
1321 1321
1322 PINMUX_IPSR_MODSEL_DATA(IP11_12, TX0_A, SEL_SCIF0_0), 1322 PINMUX_IPSR_MSEL(IP11_12, TX0_A, SEL_SCIF0_0),
1323 PINMUX_IPSR_MODSEL_DATA(IP11_12, HSPI_TX_A, SEL_HSPI_0), 1323 PINMUX_IPSR_MSEL(IP11_12, HSPI_TX_A, SEL_HSPI_0),
1324 1324
1325 PINMUX_IPSR_DATA(IP11_15_13, PENC1), 1325 PINMUX_IPSR_DATA(IP11_15_13, PENC1),
1326 PINMUX_IPSR_MODSEL_DATA(IP11_15_13, TX3_D, SEL_SCIF3_3), 1326 PINMUX_IPSR_MSEL(IP11_15_13, TX3_D, SEL_SCIF3_3),
1327 PINMUX_IPSR_MODSEL_DATA(IP11_15_13, CAN1_TX_B, SEL_RCAN1_1), 1327 PINMUX_IPSR_MSEL(IP11_15_13, CAN1_TX_B, SEL_RCAN1_1),
1328 PINMUX_IPSR_MODSEL_DATA(IP11_15_13, TX5_D, SEL_SCIF5_3), 1328 PINMUX_IPSR_MSEL(IP11_15_13, TX5_D, SEL_SCIF5_3),
1329 PINMUX_IPSR_MODSEL_DATA(IP11_15_13, IETX_B, SEL_IEBUS_1), 1329 PINMUX_IPSR_MSEL(IP11_15_13, IETX_B, SEL_IEBUS_1),
1330 1330
1331 PINMUX_IPSR_DATA(IP11_18_16, USB_OVC1), 1331 PINMUX_IPSR_DATA(IP11_18_16, USB_OVC1),
1332 PINMUX_IPSR_MODSEL_DATA(IP11_18_16, RX3_D, SEL_SCIF3_3), 1332 PINMUX_IPSR_MSEL(IP11_18_16, RX3_D, SEL_SCIF3_3),
1333 PINMUX_IPSR_MODSEL_DATA(IP11_18_16, CAN1_RX_B, SEL_RCAN1_1), 1333 PINMUX_IPSR_MSEL(IP11_18_16, CAN1_RX_B, SEL_RCAN1_1),
1334 PINMUX_IPSR_MODSEL_DATA(IP11_18_16, RX5_D, SEL_SCIF5_3), 1334 PINMUX_IPSR_MSEL(IP11_18_16, RX5_D, SEL_SCIF5_3),
1335 PINMUX_IPSR_MODSEL_DATA(IP11_18_16, IERX_B, SEL_IEBUS_1), 1335 PINMUX_IPSR_MSEL(IP11_18_16, IERX_B, SEL_IEBUS_1),
1336 1336
1337 PINMUX_IPSR_DATA(IP11_20_19, DREQ0), 1337 PINMUX_IPSR_DATA(IP11_20_19, DREQ0),
1338 PINMUX_IPSR_MODSEL_DATA(IP11_20_19, SD1_CLK_A, SEL_SDHI1_0), 1338 PINMUX_IPSR_MSEL(IP11_20_19, SD1_CLK_A, SEL_SDHI1_0),
1339 PINMUX_IPSR_DATA(IP11_20_19, ET0_TX_EN), 1339 PINMUX_IPSR_DATA(IP11_20_19, ET0_TX_EN),
1340 1340
1341 PINMUX_IPSR_DATA(IP11_22_21, DACK0), 1341 PINMUX_IPSR_DATA(IP11_22_21, DACK0),
1342 PINMUX_IPSR_MODSEL_DATA(IP11_22_21, SD1_DAT3_A, SEL_SDHI1_0), 1342 PINMUX_IPSR_MSEL(IP11_22_21, SD1_DAT3_A, SEL_SDHI1_0),
1343 PINMUX_IPSR_DATA(IP11_22_21, ET0_TX_ER), 1343 PINMUX_IPSR_DATA(IP11_22_21, ET0_TX_ER),
1344 1344
1345 PINMUX_IPSR_DATA(IP11_25_23, DREQ1), 1345 PINMUX_IPSR_DATA(IP11_25_23, DREQ1),
1346 PINMUX_IPSR_MODSEL_DATA(IP11_25_23, HSPI_CLK_B, SEL_HSPI_1), 1346 PINMUX_IPSR_MSEL(IP11_25_23, HSPI_CLK_B, SEL_HSPI_1),
1347 PINMUX_IPSR_MODSEL_DATA(IP11_25_23, RX4_B, SEL_SCIF4_1), 1347 PINMUX_IPSR_MSEL(IP11_25_23, RX4_B, SEL_SCIF4_1),
1348 PINMUX_IPSR_MODSEL_DATA(IP11_25_23, ET0_PHY_INT_C, SEL_ET0_CTL_0), 1348 PINMUX_IPSR_MSEL(IP11_25_23, ET0_PHY_INT_C, SEL_ET0_CTL_0),
1349 PINMUX_IPSR_MODSEL_DATA(IP11_25_23, ET0_TX_CLK_A, SEL_ET0_0), 1349 PINMUX_IPSR_MSEL(IP11_25_23, ET0_TX_CLK_A, SEL_ET0_0),
1350 1350
1351 PINMUX_IPSR_DATA(IP11_27_26, DACK1), 1351 PINMUX_IPSR_DATA(IP11_27_26, DACK1),
1352 PINMUX_IPSR_MODSEL_DATA(IP11_27_26, HSPI_CS_B, SEL_HSPI_1), 1352 PINMUX_IPSR_MSEL(IP11_27_26, HSPI_CS_B, SEL_HSPI_1),
1353 PINMUX_IPSR_MODSEL_DATA(IP11_27_26, TX4_B, SEL_SCIF3_1), 1353 PINMUX_IPSR_MSEL(IP11_27_26, TX4_B, SEL_SCIF3_1),
1354 PINMUX_IPSR_MODSEL_DATA(IP11_27_26, ET0_RX_CLK_A, SEL_ET0_0), 1354 PINMUX_IPSR_MSEL(IP11_27_26, ET0_RX_CLK_A, SEL_ET0_0),
1355 1355
1356 PINMUX_IPSR_DATA(IP11_28, PRESETOUT), 1356 PINMUX_IPSR_DATA(IP11_28, PRESETOUT),
1357 PINMUX_IPSR_DATA(IP11_28, ST_CLKOUT), 1357 PINMUX_IPSR_DATA(IP11_28, ST_CLKOUT),
@@ -2445,6 +2445,6 @@ const struct sh_pfc_soc_info sh7734_pinmux_info = {
2445 .cfg_regs = pinmux_config_regs, 2445 .cfg_regs = pinmux_config_regs,
2446 .data_regs = pinmux_data_regs, 2446 .data_regs = pinmux_data_regs,
2447 2447
2448 .gpio_data = pinmux_data, 2448 .pinmux_data = pinmux_data,
2449 .gpio_data_size = ARRAY_SIZE(pinmux_data), 2449 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
2450}; 2450};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
index 625661a88c52..0555a1fe076e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7757.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
@@ -2238,6 +2238,6 @@ const struct sh_pfc_soc_info sh7757_pinmux_info = {
2238 .cfg_regs = pinmux_config_regs, 2238 .cfg_regs = pinmux_config_regs,
2239 .data_regs = pinmux_data_regs, 2239 .data_regs = pinmux_data_regs,
2240 2240
2241 .gpio_data = pinmux_data, 2241 .pinmux_data = pinmux_data,
2242 .gpio_data_size = ARRAY_SIZE(pinmux_data), 2242 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
2243}; 2243};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7785.c b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
index b38dd7e3e375..1934cbec3965 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7785.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
@@ -1269,6 +1269,6 @@ const struct sh_pfc_soc_info sh7785_pinmux_info = {
1269 .cfg_regs = pinmux_config_regs, 1269 .cfg_regs = pinmux_config_regs,
1270 .data_regs = pinmux_data_regs, 1270 .data_regs = pinmux_data_regs,
1271 1271
1272 .gpio_data = pinmux_data, 1272 .pinmux_data = pinmux_data,
1273 .gpio_data_size = ARRAY_SIZE(pinmux_data), 1273 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
1274}; 1274};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7786.c b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
index 6cb4e0aaf20b..c98585d80de8 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7786.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
@@ -813,6 +813,6 @@ const struct sh_pfc_soc_info sh7786_pinmux_info = {
813 .cfg_regs = pinmux_config_regs, 813 .cfg_regs = pinmux_config_regs,
814 .data_regs = pinmux_data_regs, 814 .data_regs = pinmux_data_regs,
815 815
816 .gpio_data = pinmux_data, 816 .pinmux_data = pinmux_data,
817 .gpio_data_size = ARRAY_SIZE(pinmux_data), 817 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
818}; 818};
diff --git a/drivers/pinctrl/sh-pfc/pfc-shx3.c b/drivers/pinctrl/sh-pfc/pfc-shx3.c
index a3fcb2284d91..3f60c900645e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-shx3.c
+++ b/drivers/pinctrl/sh-pfc/pfc-shx3.c
@@ -554,8 +554,8 @@ const struct sh_pfc_soc_info shx3_pinmux_info = {
554 .nr_pins = ARRAY_SIZE(pinmux_pins), 554 .nr_pins = ARRAY_SIZE(pinmux_pins),
555 .func_gpios = pinmux_func_gpios, 555 .func_gpios = pinmux_func_gpios,
556 .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios), 556 .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
557 .gpio_data = pinmux_data, 557 .pinmux_data = pinmux_data,
558 .gpio_data_size = ARRAY_SIZE(pinmux_data), 558 .pinmux_data_size = ARRAY_SIZE(pinmux_data),
559 .cfg_regs = pinmux_config_regs, 559 .cfg_regs = pinmux_config_regs,
560 .data_regs = pinmux_data_regs, 560 .data_regs = pinmux_data_regs,
561}; 561};
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 15afd49fd4e3..7b373d43d981 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -52,6 +52,29 @@ struct sh_pfc_pin_group {
52 unsigned int nr_pins; 52 unsigned int nr_pins;
53}; 53};
54 54
55/*
56 * Using union vin_data saves memory occupied by the VIN data pins.
57 * VIN_DATA_PIN_GROUP() is a macro used to describe the VIN pin groups
58 * in this case.
59 */
60#define VIN_DATA_PIN_GROUP(n, s) \
61 { \
62 .name = #n#s, \
63 .pins = n##_pins.data##s, \
64 .mux = n##_mux.data##s, \
65 .nr_pins = ARRAY_SIZE(n##_pins.data##s), \
66 }
67
68union vin_data {
69 unsigned int data24[24];
70 unsigned int data20[20];
71 unsigned int data16[16];
72 unsigned int data12[12];
73 unsigned int data10[10];
74 unsigned int data8[8];
75 unsigned int data4[4];
76};
77
55#define SH_PFC_FUNCTION(n) \ 78#define SH_PFC_FUNCTION(n) \
56 { \ 79 { \
57 .name = #n, \ 80 .name = #n, \
@@ -98,17 +121,11 @@ struct pinmux_data_reg {
98 .enum_ids = (const u16 [r_width]) \ 121 .enum_ids = (const u16 [r_width]) \
99 122
100struct pinmux_irq { 123struct pinmux_irq {
101 int irq;
102 const short *gpios; 124 const short *gpios;
103}; 125};
104 126
105#ifdef CONFIG_ARCH_MULTIPLATFORM 127#define PINMUX_IRQ(ids...) \
106#define PINMUX_IRQ(irq_nr, ids...) \
107 { .gpios = (const short []) { ids, -1 } } 128 { .gpios = (const short []) { ids, -1 } }
108#else
109#define PINMUX_IRQ(irq_nr, ids...) \
110 { .irq = irq_nr, .gpios = (const short []) { ids, -1 } }
111#endif
112 129
113struct pinmux_range { 130struct pinmux_range {
114 u16 begin; 131 u16 begin;
@@ -143,14 +160,16 @@ struct sh_pfc_soc_info {
143 const struct sh_pfc_function *functions; 160 const struct sh_pfc_function *functions;
144 unsigned int nr_functions; 161 unsigned int nr_functions;
145 162
163#ifdef CONFIG_SUPERH
146 const struct pinmux_func *func_gpios; 164 const struct pinmux_func *func_gpios;
147 unsigned int nr_func_gpios; 165 unsigned int nr_func_gpios;
166#endif
148 167
149 const struct pinmux_cfg_reg *cfg_regs; 168 const struct pinmux_cfg_reg *cfg_regs;
150 const struct pinmux_data_reg *data_regs; 169 const struct pinmux_data_reg *data_regs;
151 170
152 const u16 *gpio_data; 171 const u16 *pinmux_data;
153 unsigned int gpio_data_size; 172 unsigned int pinmux_data_size;
154 173
155 const struct pinmux_irq *gpio_irq; 174 const struct pinmux_irq *gpio_irq;
156 unsigned int gpio_irq_size; 175 unsigned int gpio_irq_size;
@@ -163,7 +182,7 @@ struct sh_pfc_soc_info {
163 */ 182 */
164 183
165/* 184/*
166 * sh_pfc_soc_info gpio_data array macros 185 * sh_pfc_soc_info pinmux_data array macros
167 */ 186 */
168 187
169#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0 188#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
@@ -177,33 +196,33 @@ struct sh_pfc_soc_info {
177#define PINMUX_IPSR_NOFN(ipsr, fn, ms) \ 196#define PINMUX_IPSR_NOFN(ipsr, fn, ms) \
178 PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##ms) 197 PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##ms)
179#define PINMUX_IPSR_MSEL(ipsr, fn, ms) \ 198#define PINMUX_IPSR_MSEL(ipsr, fn, ms) \
180 PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ipsr, FN_##ms)
181#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) \
182 PINMUX_DATA(fn##_MARK, FN_##ms, FN_##ipsr, FN_##fn) 199 PINMUX_DATA(fn##_MARK, FN_##ms, FN_##ipsr, FN_##fn)
183 200
184/* 201/*
185 * GP port style (32 ports banks) 202 * GP port style (32 ports banks)
186 */ 203 */
187 204
188#define PORT_GP_1(bank, pin, fn, sfx) fn(bank, pin, GP_##bank##_##pin, sfx) 205#define PORT_GP_CFG_1(bank, pin, fn, sfx, cfg) fn(bank, pin, GP_##bank##_##pin, sfx, cfg)
189 206#define PORT_GP_1(bank, pin, fn, sfx) PORT_GP_CFG_1(bank, pin, fn, sfx, 0)
190#define PORT_GP_32(bank, fn, sfx) \ 207
191 PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \ 208#define PORT_GP_CFG_32(bank, fn, sfx, cfg) \
192 PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \ 209 PORT_GP_CFG_1(bank, 0, fn, sfx, cfg), PORT_GP_CFG_1(bank, 1, fn, sfx, cfg), \
193 PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \ 210 PORT_GP_CFG_1(bank, 2, fn, sfx, cfg), PORT_GP_CFG_1(bank, 3, fn, sfx, cfg), \
194 PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \ 211 PORT_GP_CFG_1(bank, 4, fn, sfx, cfg), PORT_GP_CFG_1(bank, 5, fn, sfx, cfg), \
195 PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \ 212 PORT_GP_CFG_1(bank, 6, fn, sfx, cfg), PORT_GP_CFG_1(bank, 7, fn, sfx, cfg), \
196 PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \ 213 PORT_GP_CFG_1(bank, 8, fn, sfx, cfg), PORT_GP_CFG_1(bank, 9, fn, sfx, cfg), \
197 PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \ 214 PORT_GP_CFG_1(bank, 10, fn, sfx, cfg), PORT_GP_CFG_1(bank, 11, fn, sfx, cfg), \
198 PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \ 215 PORT_GP_CFG_1(bank, 12, fn, sfx, cfg), PORT_GP_CFG_1(bank, 13, fn, sfx, cfg), \
199 PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \ 216 PORT_GP_CFG_1(bank, 14, fn, sfx, cfg), PORT_GP_CFG_1(bank, 15, fn, sfx, cfg), \
200 PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \ 217 PORT_GP_CFG_1(bank, 16, fn, sfx, cfg), PORT_GP_CFG_1(bank, 17, fn, sfx, cfg), \
201 PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \ 218 PORT_GP_CFG_1(bank, 18, fn, sfx, cfg), PORT_GP_CFG_1(bank, 19, fn, sfx, cfg), \
202 PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \ 219 PORT_GP_CFG_1(bank, 20, fn, sfx, cfg), PORT_GP_CFG_1(bank, 21, fn, sfx, cfg), \
203 PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx), \ 220 PORT_GP_CFG_1(bank, 22, fn, sfx, cfg), PORT_GP_CFG_1(bank, 23, fn, sfx, cfg), \
204 PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx), \ 221 PORT_GP_CFG_1(bank, 24, fn, sfx, cfg), PORT_GP_CFG_1(bank, 25, fn, sfx, cfg), \
205 PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx), \ 222 PORT_GP_CFG_1(bank, 26, fn, sfx, cfg), PORT_GP_CFG_1(bank, 27, fn, sfx, cfg), \
206 PORT_GP_1(bank, 30, fn, sfx), PORT_GP_1(bank, 31, fn, sfx) 223 PORT_GP_CFG_1(bank, 28, fn, sfx, cfg), PORT_GP_CFG_1(bank, 29, fn, sfx, cfg), \
224 PORT_GP_CFG_1(bank, 30, fn, sfx, cfg), PORT_GP_CFG_1(bank, 31, fn, sfx, cfg)
225#define PORT_GP_32(bank, fn, sfx) PORT_GP_CFG_32(bank, fn, sfx, 0)
207 226
208#define PORT_GP_32_REV(bank, fn, sfx) \ 227#define PORT_GP_32_REV(bank, fn, sfx) \
209 PORT_GP_1(bank, 31, fn, sfx), PORT_GP_1(bank, 30, fn, sfx), \ 228 PORT_GP_1(bank, 31, fn, sfx), PORT_GP_1(bank, 30, fn, sfx), \
@@ -224,20 +243,21 @@ struct sh_pfc_soc_info {
224 PORT_GP_1(bank, 1, fn, sfx), PORT_GP_1(bank, 0, fn, sfx) 243 PORT_GP_1(bank, 1, fn, sfx), PORT_GP_1(bank, 0, fn, sfx)
225 244
226/* GP_ALL(suffix) - Expand to a list of GP_#_#_suffix */ 245/* GP_ALL(suffix) - Expand to a list of GP_#_#_suffix */
227#define _GP_ALL(bank, pin, name, sfx) name##_##sfx 246#define _GP_ALL(bank, pin, name, sfx, cfg) name##_##sfx
228#define GP_ALL(str) CPU_ALL_PORT(_GP_ALL, str) 247#define GP_ALL(str) CPU_ALL_PORT(_GP_ALL, str)
229 248
230/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ 249/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
231#define _GP_GPIO(bank, _pin, _name, sfx) \ 250#define _GP_GPIO(bank, _pin, _name, sfx, cfg) \
232 { \ 251 { \
233 .pin = (bank * 32) + _pin, \ 252 .pin = (bank * 32) + _pin, \
234 .name = __stringify(_name), \ 253 .name = __stringify(_name), \
235 .enum_id = _name##_DATA, \ 254 .enum_id = _name##_DATA, \
255 .configs = cfg, \
236 } 256 }
237#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, unused) 257#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, unused)
238 258
239/* PINMUX_DATA_GP_ALL - Expand to a list of name_DATA, name_FN marks */ 259/* PINMUX_DATA_GP_ALL - Expand to a list of name_DATA, name_FN marks */
240#define _GP_DATA(bank, pin, name, sfx) PINMUX_DATA(name##_DATA, name##_FN) 260#define _GP_DATA(bank, pin, name, sfx, cfg) PINMUX_DATA(name##_DATA, name##_FN)
241#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, unused) 261#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, unused)
242 262
243/* 263/*
@@ -326,4 +346,9 @@ struct sh_pfc_soc_info {
326 } \ 346 } \
327 } 347 }
328 348
349/*
350 * GPIO number helper macro for R-Car
351 */
352#define RCAR_GP_PIN(bank, pin) (((bank) * 32) + (pin))
353
329#endif /* __SH_PFC_H */ 354#endif /* __SH_PFC_H */
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 0d24d9e4b70c..829018c812bd 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -544,6 +544,11 @@ static const struct pinctrl_pin_desc atlas7_ioc_pads[] = {
544 PINCTRL_PIN(156, "lvds_tx0d1n"), 544 PINCTRL_PIN(156, "lvds_tx0d1n"),
545 PINCTRL_PIN(157, "lvds_tx0d0p"), 545 PINCTRL_PIN(157, "lvds_tx0d0p"),
546 PINCTRL_PIN(158, "lvds_tx0d0n"), 546 PINCTRL_PIN(158, "lvds_tx0d0n"),
547 PINCTRL_PIN(159, "jtag_tdo"),
548 PINCTRL_PIN(160, "jtag_tms"),
549 PINCTRL_PIN(161, "jtag_tck"),
550 PINCTRL_PIN(162, "jtag_tdi"),
551 PINCTRL_PIN(163, "jtag_trstn"),
547}; 552};
548 553
549struct atlas7_pad_config atlas7_ioc_pad_confs[] = { 554struct atlas7_pad_config atlas7_ioc_pad_confs[] = {
@@ -708,6 +713,11 @@ struct atlas7_pad_config atlas7_ioc_pad_confs[] = {
708 PADCONF(156, 7, 0x130, 0x270, -1, 0x480, 28, 14, 0, 7), 713 PADCONF(156, 7, 0x130, 0x270, -1, 0x480, 28, 14, 0, 7),
709 PADCONF(157, 7, 0x138, 0x278, -1, 0x480, 0, 0, 0, 8), 714 PADCONF(157, 7, 0x138, 0x278, -1, 0x480, 0, 0, 0, 8),
710 PADCONF(158, 7, 0x138, 0x278, -1, 0x480, 4, 2, 0, 9), 715 PADCONF(158, 7, 0x138, 0x278, -1, 0x480, 4, 2, 0, 9),
716 PADCONF(159, 5, 0x140, 0x280, 0x380, -1, 0, 0, 0, 0),
717 PADCONF(160, 6, 0x140, 0x280, 0x380, -1, 4, 2, 2, 0),
718 PADCONF(161, 5, 0x140, 0x280, 0x380, -1, 8, 4, 4, 0),
719 PADCONF(162, 6, 0x140, 0x280, 0x380, -1, 12, 6, 6, 0),
720 PADCONF(163, 6, 0x140, 0x280, 0x380, -1, 16, 8, 8, 0),
711}; 721};
712 722
713/* pin list of each pin group */ 723/* pin list of each pin group */
@@ -724,12 +734,15 @@ static const unsigned int sp_rgmii_gpio_pins[] = { 97, 98, 99, 100, 101, 102,
724 141, 142, 143, 144, 145, 146, 147, 148, }; 734 141, 142, 143, 144, 145, 146, 147, 148, };
725static const unsigned int lvds_gpio_pins[] = { 157, 158, 155, 156, 153, 154, 735static const unsigned int lvds_gpio_pins[] = { 157, 158, 155, 156, 153, 154,
726 151, 152, 149, 150, }; 736 151, 152, 149, 150, };
727static const unsigned int uart_nand_gpio_pins[] = { 44, 43, 42, 41, 40, 39, 737static const unsigned int jtag_uart_nand_gpio_pins[] = { 44, 43, 42, 41, 40,
728 38, 37, 46, 47, 48, 49, 50, 52, 51, 45, 133, 134, 135, 136, 738 39, 38, 37, 46, 47, 48, 49, 50, 52, 51, 45, 133, 134, 135,
729 137, 138, 139, 140, }; 739 136, 137, 138, 139, 140, 159, 160, 161, 162, 163, };
730static const unsigned int rtc_gpio_pins[] = { 0, 1, 2, 3, 4, 10, 11, 12, 13, 740static const unsigned int rtc_gpio_pins[] = { 0, 1, 2, 3, 4, 10, 11, 12, 13,
731 14, 15, 16, 17, }; 741 14, 15, 16, 17, 9, };
732static const unsigned int audio_ac97_pins[] = { 113, 118, 115, 114, }; 742static const unsigned int audio_ac97_pins[] = { 113, 118, 115, 114, };
743static const unsigned int audio_digmic_pins0[] = { 51, };
744static const unsigned int audio_digmic_pins1[] = { 122, };
745static const unsigned int audio_digmic_pins2[] = { 161, };
733static const unsigned int audio_func_dbg_pins[] = { 141, 144, 44, 43, 42, 41, 746static const unsigned int audio_func_dbg_pins[] = { 141, 144, 44, 43, 42, 41,
734 40, 39, 38, 37, 74, 75, 76, 77, 78, 79, 81, 113, 114, 118, 747 40, 39, 38, 37, 74, 75, 76, 77, 78, 79, 81, 113, 114, 118,
735 115, 49, 50, 142, 143, 80, }; 748 115, 49, 50, 142, 143, 80, };
@@ -737,16 +750,49 @@ static const unsigned int audio_i2s_pins[] = { 118, 115, 116, 117, 112, 113,
737 114, }; 750 114, };
738static const unsigned int audio_i2s_2ch_pins[] = { 118, 115, 112, 113, 114, }; 751static const unsigned int audio_i2s_2ch_pins[] = { 118, 115, 112, 113, 114, };
739static const unsigned int audio_i2s_extclk_pins[] = { 112, }; 752static const unsigned int audio_i2s_extclk_pins[] = { 112, };
740static const unsigned int audio_uart0_pins[] = { 143, 142, 141, 144, }; 753static const unsigned int audio_spdif_out_pins0[] = { 112, };
741static const unsigned int audio_uart1_pins[] = { 147, 146, 145, 148, }; 754static const unsigned int audio_spdif_out_pins1[] = { 116, };
742static const unsigned int audio_uart2_pins0[] = { 20, 21, 19, 18, }; 755static const unsigned int audio_spdif_out_pins2[] = { 142, };
743static const unsigned int audio_uart2_pins1[] = { 109, 110, 101, 111, }; 756static const unsigned int audio_uart0_basic_pins[] = { 143, 142, 141, 144, };
744static const unsigned int c_can_trnsvr_pins[] = { 1, }; 757static const unsigned int audio_uart0_urfs_pins0[] = { 117, };
745static const unsigned int c0_can_pins0[] = { 11, 10, }; 758static const unsigned int audio_uart0_urfs_pins1[] = { 139, };
746static const unsigned int c0_can_pins1[] = { 2, 3, }; 759static const unsigned int audio_uart0_urfs_pins2[] = { 163, };
747static const unsigned int c1_can_pins0[] = { 138, 137, }; 760static const unsigned int audio_uart0_urfs_pins3[] = { 162, };
748static const unsigned int c1_can_pins1[] = { 147, 146, }; 761static const unsigned int audio_uart1_basic_pins[] = { 147, 146, 145, 148, };
749static const unsigned int c1_can_pins2[] = { 2, 3, }; 762static const unsigned int audio_uart1_urfs_pins0[] = { 117, };
763static const unsigned int audio_uart1_urfs_pins1[] = { 140, };
764static const unsigned int audio_uart1_urfs_pins2[] = { 163, };
765static const unsigned int audio_uart2_urfs_pins0[] = { 139, };
766static const unsigned int audio_uart2_urfs_pins1[] = { 163, };
767static const unsigned int audio_uart2_urfs_pins2[] = { 96, };
768static const unsigned int audio_uart2_urxd_pins0[] = { 20, };
769static const unsigned int audio_uart2_urxd_pins1[] = { 109, };
770static const unsigned int audio_uart2_urxd_pins2[] = { 93, };
771static const unsigned int audio_uart2_usclk_pins0[] = { 19, };
772static const unsigned int audio_uart2_usclk_pins1[] = { 101, };
773static const unsigned int audio_uart2_usclk_pins2[] = { 91, };
774static const unsigned int audio_uart2_utfs_pins0[] = { 18, };
775static const unsigned int audio_uart2_utfs_pins1[] = { 111, };
776static const unsigned int audio_uart2_utfs_pins2[] = { 94, };
777static const unsigned int audio_uart2_utxd_pins0[] = { 21, };
778static const unsigned int audio_uart2_utxd_pins1[] = { 110, };
779static const unsigned int audio_uart2_utxd_pins2[] = { 92, };
780static const unsigned int c_can_trnsvr_en_pins0[] = { 2, };
781static const unsigned int c_can_trnsvr_en_pins1[] = { 0, };
782static const unsigned int c_can_trnsvr_intr_pins[] = { 1, };
783static const unsigned int c_can_trnsvr_stb_n_pins[] = { 3, };
784static const unsigned int c0_can_rxd_trnsv0_pins[] = { 11, };
785static const unsigned int c0_can_rxd_trnsv1_pins[] = { 2, };
786static const unsigned int c0_can_txd_trnsv0_pins[] = { 10, };
787static const unsigned int c0_can_txd_trnsv1_pins[] = { 3, };
788static const unsigned int c1_can_rxd_pins0[] = { 138, };
789static const unsigned int c1_can_rxd_pins1[] = { 147, };
790static const unsigned int c1_can_rxd_pins2[] = { 2, };
791static const unsigned int c1_can_rxd_pins3[] = { 162, };
792static const unsigned int c1_can_txd_pins0[] = { 137, };
793static const unsigned int c1_can_txd_pins1[] = { 146, };
794static const unsigned int c1_can_txd_pins2[] = { 3, };
795static const unsigned int c1_can_txd_pins3[] = { 161, };
750static const unsigned int ca_audio_lpc_pins[] = { 62, 63, 64, 65, 66, 67, 68, 796static const unsigned int ca_audio_lpc_pins[] = { 62, 63, 64, 65, 66, 67, 68,
751 69, 70, 71, }; 797 69, 70, 71, };
752static const unsigned int ca_bt_lpc_pins[] = { 85, 86, 87, 88, 89, 90, }; 798static const unsigned int ca_bt_lpc_pins[] = { 85, 86, 87, 88, 89, 90, };
@@ -804,7 +850,29 @@ static const unsigned int gn_trg_shutdown_pins2[] = { 117, };
804static const unsigned int gn_trg_shutdown_pins3[] = { 123, }; 850static const unsigned int gn_trg_shutdown_pins3[] = { 123, };
805static const unsigned int i2c0_pins[] = { 128, 127, }; 851static const unsigned int i2c0_pins[] = { 128, 127, };
806static const unsigned int i2c1_pins[] = { 126, 125, }; 852static const unsigned int i2c1_pins[] = { 126, 125, };
807static const unsigned int jtag_pins0[] = { 125, 4, 2, 0, 1, 3, }; 853static const unsigned int i2s0_pins[] = { 91, 93, 94, 92, };
854static const unsigned int i2s1_basic_pins[] = { 95, 96, };
855static const unsigned int i2s1_rxd0_pins0[] = { 61, };
856static const unsigned int i2s1_rxd0_pins1[] = { 131, };
857static const unsigned int i2s1_rxd0_pins2[] = { 129, };
858static const unsigned int i2s1_rxd0_pins3[] = { 117, };
859static const unsigned int i2s1_rxd0_pins4[] = { 83, };
860static const unsigned int i2s1_rxd1_pins0[] = { 72, };
861static const unsigned int i2s1_rxd1_pins1[] = { 132, };
862static const unsigned int i2s1_rxd1_pins2[] = { 130, };
863static const unsigned int i2s1_rxd1_pins3[] = { 118, };
864static const unsigned int i2s1_rxd1_pins4[] = { 84, };
865static const unsigned int jtag_jt_dbg_nsrst_pins[] = { 125, };
866static const unsigned int jtag_ntrst_pins0[] = { 4, };
867static const unsigned int jtag_ntrst_pins1[] = { 163, };
868static const unsigned int jtag_swdiotms_pins0[] = { 2, };
869static const unsigned int jtag_swdiotms_pins1[] = { 160, };
870static const unsigned int jtag_tck_pins0[] = { 0, };
871static const unsigned int jtag_tck_pins1[] = { 161, };
872static const unsigned int jtag_tdi_pins0[] = { 1, };
873static const unsigned int jtag_tdi_pins1[] = { 162, };
874static const unsigned int jtag_tdo_pins0[] = { 3, };
875static const unsigned int jtag_tdo_pins1[] = { 159, };
808static const unsigned int ks_kas_spi_pins0[] = { 141, 144, 143, 142, }; 876static const unsigned int ks_kas_spi_pins0[] = { 141, 144, 143, 142, };
809static const unsigned int ld_ldd_pins[] = { 57, 58, 59, 60, 61, 62, 63, 64, 877static const unsigned int ld_ldd_pins[] = { 57, 58, 59, 60, 61, 62, 63, 64,
810 65, 66, 67, 68, 69, 70, 71, 72, 74, 75, 76, 77, 78, 79, 80, 878 65, 66, 67, 68, 69, 70, 71, 72, 74, 75, 76, 77, 78, 79, 80,
@@ -821,7 +889,7 @@ static const unsigned int nd_df_pins[] = { 44, 43, 42, 41, 40, 39, 38, 37,
821 47, 46, 52, 51, 45, 49, 50, 48, 124, }; 889 47, 46, 52, 51, 45, 49, 50, 48, 124, };
822static const unsigned int nd_df_nowp_pins[] = { 44, 43, 42, 41, 40, 39, 38, 890static const unsigned int nd_df_nowp_pins[] = { 44, 43, 42, 41, 40, 39, 38,
823 37, 47, 46, 52, 51, 45, 49, 50, 48, }; 891 37, 47, 46, 52, 51, 45, 49, 50, 48, };
824static const unsigned int ps_pins[] = { 120, 119, }; 892static const unsigned int ps_pins[] = { 120, 119, 121, };
825static const unsigned int pwc_core_on_pins[] = { 8, }; 893static const unsigned int pwc_core_on_pins[] = { 8, };
826static const unsigned int pwc_ext_on_pins[] = { 6, }; 894static const unsigned int pwc_ext_on_pins[] = { 6, };
827static const unsigned int pwc_gpio3_clk_pins[] = { 3, }; 895static const unsigned int pwc_gpio3_clk_pins[] = { 3, };
@@ -836,18 +904,26 @@ static const unsigned int pwc_wakeup_src3_pins[] = { 3, };
836static const unsigned int pw_cko0_pins0[] = { 123, }; 904static const unsigned int pw_cko0_pins0[] = { 123, };
837static const unsigned int pw_cko0_pins1[] = { 101, }; 905static const unsigned int pw_cko0_pins1[] = { 101, };
838static const unsigned int pw_cko0_pins2[] = { 82, }; 906static const unsigned int pw_cko0_pins2[] = { 82, };
907static const unsigned int pw_cko0_pins3[] = { 162, };
839static const unsigned int pw_cko1_pins0[] = { 124, }; 908static const unsigned int pw_cko1_pins0[] = { 124, };
840static const unsigned int pw_cko1_pins1[] = { 110, }; 909static const unsigned int pw_cko1_pins1[] = { 110, };
910static const unsigned int pw_cko1_pins2[] = { 163, };
841static const unsigned int pw_i2s01_clk_pins0[] = { 125, }; 911static const unsigned int pw_i2s01_clk_pins0[] = { 125, };
842static const unsigned int pw_i2s01_clk_pins1[] = { 117, }; 912static const unsigned int pw_i2s01_clk_pins1[] = { 117, };
843static const unsigned int pw_pwm0_pins[] = { 119, }; 913static const unsigned int pw_i2s01_clk_pins2[] = { 132, };
844static const unsigned int pw_pwm1_pins[] = { 120, }; 914static const unsigned int pw_pwm0_pins0[] = { 119, };
915static const unsigned int pw_pwm0_pins1[] = { 159, };
916static const unsigned int pw_pwm1_pins0[] = { 120, };
917static const unsigned int pw_pwm1_pins1[] = { 160, };
918static const unsigned int pw_pwm1_pins2[] = { 131, };
845static const unsigned int pw_pwm2_pins0[] = { 121, }; 919static const unsigned int pw_pwm2_pins0[] = { 121, };
846static const unsigned int pw_pwm2_pins1[] = { 98, }; 920static const unsigned int pw_pwm2_pins1[] = { 98, };
921static const unsigned int pw_pwm2_pins2[] = { 161, };
847static const unsigned int pw_pwm3_pins0[] = { 122, }; 922static const unsigned int pw_pwm3_pins0[] = { 122, };
848static const unsigned int pw_pwm3_pins1[] = { 73, }; 923static const unsigned int pw_pwm3_pins1[] = { 73, };
849static const unsigned int pw_pwm_cpu_vol_pins0[] = { 121, }; 924static const unsigned int pw_pwm_cpu_vol_pins0[] = { 121, };
850static const unsigned int pw_pwm_cpu_vol_pins1[] = { 98, }; 925static const unsigned int pw_pwm_cpu_vol_pins1[] = { 98, };
926static const unsigned int pw_pwm_cpu_vol_pins2[] = { 161, };
851static const unsigned int pw_backlight_pins0[] = { 122, }; 927static const unsigned int pw_backlight_pins0[] = { 122, };
852static const unsigned int pw_backlight_pins1[] = { 73, }; 928static const unsigned int pw_backlight_pins1[] = { 73, };
853static const unsigned int rg_eth_mac_pins[] = { 108, 103, 104, 105, 106, 107, 929static const unsigned int rg_eth_mac_pins[] = { 108, 103, 104, 105, 106, 107,
@@ -863,8 +939,11 @@ static const unsigned int sd1_pins[] = { 48, 49, 44, 43, 42, 41, 40, 39, 38,
863 37, }; 939 37, };
864static const unsigned int sd1_4bit_pins0[] = { 48, 49, 44, 43, 42, 41, }; 940static const unsigned int sd1_4bit_pins0[] = { 48, 49, 44, 43, 42, 41, };
865static const unsigned int sd1_4bit_pins1[] = { 48, 49, 40, 39, 38, 37, }; 941static const unsigned int sd1_4bit_pins1[] = { 48, 49, 40, 39, 38, 37, };
866static const unsigned int sd2_pins0[] = { 124, 31, 32, 33, 34, 35, 36, 123, }; 942static const unsigned int sd2_basic_pins[] = { 31, 32, 33, 34, 35, 36, };
867static const unsigned int sd2_no_cdb_pins0[] = { 31, 32, 33, 34, 35, 36, 123, }; 943static const unsigned int sd2_cdb_pins0[] = { 124, };
944static const unsigned int sd2_cdb_pins1[] = { 161, };
945static const unsigned int sd2_wpb_pins0[] = { 123, };
946static const unsigned int sd2_wpb_pins1[] = { 163, };
868static const unsigned int sd3_pins[] = { 85, 86, 87, 88, 89, 90, }; 947static const unsigned int sd3_pins[] = { 85, 86, 87, 88, 89, 90, };
869static const unsigned int sd5_pins[] = { 91, 92, 93, 94, 95, 96, }; 948static const unsigned int sd5_pins[] = { 91, 92, 93, 94, 95, 96, };
870static const unsigned int sd6_pins0[] = { 79, 78, 74, 75, 76, 77, }; 949static const unsigned int sd6_pins0[] = { 79, 78, 74, 75, 76, 77, };
@@ -877,19 +956,39 @@ static const unsigned int tpiu_trace_pins[] = { 53, 56, 57, 58, 59, 60, 61,
877static const unsigned int uart0_pins[] = { 121, 120, 134, 133, }; 956static const unsigned int uart0_pins[] = { 121, 120, 134, 133, };
878static const unsigned int uart0_nopause_pins[] = { 134, 133, }; 957static const unsigned int uart0_nopause_pins[] = { 134, 133, };
879static const unsigned int uart1_pins[] = { 136, 135, }; 958static const unsigned int uart1_pins[] = { 136, 135, };
880static const unsigned int uart2_pins[] = { 11, 10, }; 959static const unsigned int uart2_cts_pins0[] = { 132, };
881static const unsigned int uart3_pins0[] = { 125, 126, 138, 137, }; 960static const unsigned int uart2_cts_pins1[] = { 162, };
882static const unsigned int uart3_pins1[] = { 111, 109, 84, 83, }; 961static const unsigned int uart2_rts_pins0[] = { 131, };
883static const unsigned int uart3_pins2[] = { 140, 139, 138, 137, }; 962static const unsigned int uart2_rts_pins1[] = { 161, };
884static const unsigned int uart3_pins3[] = { 139, 140, 84, 83, }; 963static const unsigned int uart2_rxd_pins0[] = { 11, };
885static const unsigned int uart3_nopause_pins0[] = { 138, 137, }; 964static const unsigned int uart2_rxd_pins1[] = { 160, };
886static const unsigned int uart3_nopause_pins1[] = { 84, 83, }; 965static const unsigned int uart2_rxd_pins2[] = { 130, };
887static const unsigned int uart4_pins0[] = { 122, 123, 140, 139, }; 966static const unsigned int uart2_txd_pins0[] = { 10, };
888static const unsigned int uart4_pins1[] = { 100, 99, 140, 139, }; 967static const unsigned int uart2_txd_pins1[] = { 159, };
889static const unsigned int uart4_pins2[] = { 117, 116, 140, 139, }; 968static const unsigned int uart2_txd_pins2[] = { 129, };
890static const unsigned int uart4_nopause_pins[] = { 140, 139, }; 969static const unsigned int uart3_cts_pins0[] = { 125, };
891static const unsigned int usb0_drvvbus_pins[] = { 51, }; 970static const unsigned int uart3_cts_pins1[] = { 111, };
892static const unsigned int usb1_drvvbus_pins[] = { 134, }; 971static const unsigned int uart3_cts_pins2[] = { 140, };
972static const unsigned int uart3_rts_pins0[] = { 126, };
973static const unsigned int uart3_rts_pins1[] = { 109, };
974static const unsigned int uart3_rts_pins2[] = { 139, };
975static const unsigned int uart3_rxd_pins0[] = { 138, };
976static const unsigned int uart3_rxd_pins1[] = { 84, };
977static const unsigned int uart3_rxd_pins2[] = { 162, };
978static const unsigned int uart3_txd_pins0[] = { 137, };
979static const unsigned int uart3_txd_pins1[] = { 83, };
980static const unsigned int uart3_txd_pins2[] = { 161, };
981static const unsigned int uart4_basic_pins[] = { 140, 139, };
982static const unsigned int uart4_cts_pins0[] = { 122, };
983static const unsigned int uart4_cts_pins1[] = { 100, };
984static const unsigned int uart4_cts_pins2[] = { 117, };
985static const unsigned int uart4_rts_pins0[] = { 123, };
986static const unsigned int uart4_rts_pins1[] = { 99, };
987static const unsigned int uart4_rts_pins2[] = { 116, };
988static const unsigned int usb0_drvvbus_pins0[] = { 51, };
989static const unsigned int usb0_drvvbus_pins1[] = { 162, };
990static const unsigned int usb1_drvvbus_pins0[] = { 134, };
991static const unsigned int usb1_drvvbus_pins1[] = { 163, };
893static const unsigned int visbus_dout_pins[] = { 57, 58, 59, 60, 61, 62, 63, 992static const unsigned int visbus_dout_pins[] = { 57, 58, 59, 60, 61, 62, 63,
894 64, 65, 66, 67, 68, 69, 70, 71, 72, 53, 54, 55, 56, 85, 86, 993 64, 65, 66, 67, 68, 69, 70, 71, 72, 53, 54, 55, 56, 85, 86,
895 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, }; 994 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, };
@@ -910,23 +1009,59 @@ struct atlas7_pin_group altas7_pin_groups[] = {
910 GROUP("sdio_i2s_gpio_grp", sdio_i2s_gpio_pins), 1009 GROUP("sdio_i2s_gpio_grp", sdio_i2s_gpio_pins),
911 GROUP("sp_rgmii_gpio_grp", sp_rgmii_gpio_pins), 1010 GROUP("sp_rgmii_gpio_grp", sp_rgmii_gpio_pins),
912 GROUP("lvds_gpio_grp", lvds_gpio_pins), 1011 GROUP("lvds_gpio_grp", lvds_gpio_pins),
913 GROUP("uart_nand_gpio_grp", uart_nand_gpio_pins), 1012 GROUP("jtag_uart_nand_gpio_grp", jtag_uart_nand_gpio_pins),
914 GROUP("rtc_gpio_grp", rtc_gpio_pins), 1013 GROUP("rtc_gpio_grp", rtc_gpio_pins),
915 GROUP("audio_ac97_grp", audio_ac97_pins), 1014 GROUP("audio_ac97_grp", audio_ac97_pins),
1015 GROUP("audio_digmic_grp0", audio_digmic_pins0),
1016 GROUP("audio_digmic_grp1", audio_digmic_pins1),
1017 GROUP("audio_digmic_grp2", audio_digmic_pins2),
916 GROUP("audio_func_dbg_grp", audio_func_dbg_pins), 1018 GROUP("audio_func_dbg_grp", audio_func_dbg_pins),
917 GROUP("audio_i2s_grp", audio_i2s_pins), 1019 GROUP("audio_i2s_grp", audio_i2s_pins),
918 GROUP("audio_i2s_2ch_grp", audio_i2s_2ch_pins), 1020 GROUP("audio_i2s_2ch_grp", audio_i2s_2ch_pins),
919 GROUP("audio_i2s_extclk_grp", audio_i2s_extclk_pins), 1021 GROUP("audio_i2s_extclk_grp", audio_i2s_extclk_pins),
920 GROUP("audio_uart0_grp", audio_uart0_pins), 1022 GROUP("audio_spdif_out_grp0", audio_spdif_out_pins0),
921 GROUP("audio_uart1_grp", audio_uart1_pins), 1023 GROUP("audio_spdif_out_grp1", audio_spdif_out_pins1),
922 GROUP("audio_uart2_grp0", audio_uart2_pins0), 1024 GROUP("audio_spdif_out_grp2", audio_spdif_out_pins2),
923 GROUP("audio_uart2_grp1", audio_uart2_pins1), 1025 GROUP("audio_uart0_basic_grp", audio_uart0_basic_pins),
924 GROUP("c_can_trnsvr_grp", c_can_trnsvr_pins), 1026 GROUP("audio_uart0_urfs_grp0", audio_uart0_urfs_pins0),
925 GROUP("c0_can_grp0", c0_can_pins0), 1027 GROUP("audio_uart0_urfs_grp1", audio_uart0_urfs_pins1),
926 GROUP("c0_can_grp1", c0_can_pins1), 1028 GROUP("audio_uart0_urfs_grp2", audio_uart0_urfs_pins2),
927 GROUP("c1_can_grp0", c1_can_pins0), 1029 GROUP("audio_uart0_urfs_grp3", audio_uart0_urfs_pins3),
928 GROUP("c1_can_grp1", c1_can_pins1), 1030 GROUP("audio_uart1_basic_grp", audio_uart1_basic_pins),
929 GROUP("c1_can_grp2", c1_can_pins2), 1031 GROUP("audio_uart1_urfs_grp0", audio_uart1_urfs_pins0),
1032 GROUP("audio_uart1_urfs_grp1", audio_uart1_urfs_pins1),
1033 GROUP("audio_uart1_urfs_grp2", audio_uart1_urfs_pins2),
1034 GROUP("audio_uart2_urfs_grp0", audio_uart2_urfs_pins0),
1035 GROUP("audio_uart2_urfs_grp1", audio_uart2_urfs_pins1),
1036 GROUP("audio_uart2_urfs_grp2", audio_uart2_urfs_pins2),
1037 GROUP("audio_uart2_urxd_grp0", audio_uart2_urxd_pins0),
1038 GROUP("audio_uart2_urxd_grp1", audio_uart2_urxd_pins1),
1039 GROUP("audio_uart2_urxd_grp2", audio_uart2_urxd_pins2),
1040 GROUP("audio_uart2_usclk_grp0", audio_uart2_usclk_pins0),
1041 GROUP("audio_uart2_usclk_grp1", audio_uart2_usclk_pins1),
1042 GROUP("audio_uart2_usclk_grp2", audio_uart2_usclk_pins2),
1043 GROUP("audio_uart2_utfs_grp0", audio_uart2_utfs_pins0),
1044 GROUP("audio_uart2_utfs_grp1", audio_uart2_utfs_pins1),
1045 GROUP("audio_uart2_utfs_grp2", audio_uart2_utfs_pins2),
1046 GROUP("audio_uart2_utxd_grp0", audio_uart2_utxd_pins0),
1047 GROUP("audio_uart2_utxd_grp1", audio_uart2_utxd_pins1),
1048 GROUP("audio_uart2_utxd_grp2", audio_uart2_utxd_pins2),
1049 GROUP("c_can_trnsvr_en_grp0", c_can_trnsvr_en_pins0),
1050 GROUP("c_can_trnsvr_en_grp1", c_can_trnsvr_en_pins1),
1051 GROUP("c_can_trnsvr_intr_grp", c_can_trnsvr_intr_pins),
1052 GROUP("c_can_trnsvr_stb_n_grp", c_can_trnsvr_stb_n_pins),
1053 GROUP("c0_can_rxd_trnsv0_grp", c0_can_rxd_trnsv0_pins),
1054 GROUP("c0_can_rxd_trnsv1_grp", c0_can_rxd_trnsv1_pins),
1055 GROUP("c0_can_txd_trnsv0_grp", c0_can_txd_trnsv0_pins),
1056 GROUP("c0_can_txd_trnsv1_grp", c0_can_txd_trnsv1_pins),
1057 GROUP("c1_can_rxd_grp0", c1_can_rxd_pins0),
1058 GROUP("c1_can_rxd_grp1", c1_can_rxd_pins1),
1059 GROUP("c1_can_rxd_grp2", c1_can_rxd_pins2),
1060 GROUP("c1_can_rxd_grp3", c1_can_rxd_pins3),
1061 GROUP("c1_can_txd_grp0", c1_can_txd_pins0),
1062 GROUP("c1_can_txd_grp1", c1_can_txd_pins1),
1063 GROUP("c1_can_txd_grp2", c1_can_txd_pins2),
1064 GROUP("c1_can_txd_grp3", c1_can_txd_pins3),
930 GROUP("ca_audio_lpc_grp", ca_audio_lpc_pins), 1065 GROUP("ca_audio_lpc_grp", ca_audio_lpc_pins),
931 GROUP("ca_bt_lpc_grp", ca_bt_lpc_pins), 1066 GROUP("ca_bt_lpc_grp", ca_bt_lpc_pins),
932 GROUP("ca_coex_grp", ca_coex_pins), 1067 GROUP("ca_coex_grp", ca_coex_pins),
@@ -977,7 +1112,29 @@ struct atlas7_pin_group altas7_pin_groups[] = {
977 GROUP("gn_trg_shutdown_grp3", gn_trg_shutdown_pins3), 1112 GROUP("gn_trg_shutdown_grp3", gn_trg_shutdown_pins3),
978 GROUP("i2c0_grp", i2c0_pins), 1113 GROUP("i2c0_grp", i2c0_pins),
979 GROUP("i2c1_grp", i2c1_pins), 1114 GROUP("i2c1_grp", i2c1_pins),
980 GROUP("jtag_grp0", jtag_pins0), 1115 GROUP("i2s0_grp", i2s0_pins),
1116 GROUP("i2s1_basic_grp", i2s1_basic_pins),
1117 GROUP("i2s1_rxd0_grp0", i2s1_rxd0_pins0),
1118 GROUP("i2s1_rxd0_grp1", i2s1_rxd0_pins1),
1119 GROUP("i2s1_rxd0_grp2", i2s1_rxd0_pins2),
1120 GROUP("i2s1_rxd0_grp3", i2s1_rxd0_pins3),
1121 GROUP("i2s1_rxd0_grp4", i2s1_rxd0_pins4),
1122 GROUP("i2s1_rxd1_grp0", i2s1_rxd1_pins0),
1123 GROUP("i2s1_rxd1_grp1", i2s1_rxd1_pins1),
1124 GROUP("i2s1_rxd1_grp2", i2s1_rxd1_pins2),
1125 GROUP("i2s1_rxd1_grp3", i2s1_rxd1_pins3),
1126 GROUP("i2s1_rxd1_grp4", i2s1_rxd1_pins4),
1127 GROUP("jtag_jt_dbg_nsrst_grp", jtag_jt_dbg_nsrst_pins),
1128 GROUP("jtag_ntrst_grp0", jtag_ntrst_pins0),
1129 GROUP("jtag_ntrst_grp1", jtag_ntrst_pins1),
1130 GROUP("jtag_swdiotms_grp0", jtag_swdiotms_pins0),
1131 GROUP("jtag_swdiotms_grp1", jtag_swdiotms_pins1),
1132 GROUP("jtag_tck_grp0", jtag_tck_pins0),
1133 GROUP("jtag_tck_grp1", jtag_tck_pins1),
1134 GROUP("jtag_tdi_grp0", jtag_tdi_pins0),
1135 GROUP("jtag_tdi_grp1", jtag_tdi_pins1),
1136 GROUP("jtag_tdo_grp0", jtag_tdo_pins0),
1137 GROUP("jtag_tdo_grp1", jtag_tdo_pins1),
981 GROUP("ks_kas_spi_grp0", ks_kas_spi_pins0), 1138 GROUP("ks_kas_spi_grp0", ks_kas_spi_pins0),
982 GROUP("ld_ldd_grp", ld_ldd_pins), 1139 GROUP("ld_ldd_grp", ld_ldd_pins),
983 GROUP("ld_ldd_16bit_grp", ld_ldd_16bit_pins), 1140 GROUP("ld_ldd_16bit_grp", ld_ldd_16bit_pins),
@@ -1002,18 +1159,26 @@ struct atlas7_pin_group altas7_pin_groups[] = {
1002 GROUP("pw_cko0_grp0", pw_cko0_pins0), 1159 GROUP("pw_cko0_grp0", pw_cko0_pins0),
1003 GROUP("pw_cko0_grp1", pw_cko0_pins1), 1160 GROUP("pw_cko0_grp1", pw_cko0_pins1),
1004 GROUP("pw_cko0_grp2", pw_cko0_pins2), 1161 GROUP("pw_cko0_grp2", pw_cko0_pins2),
1162 GROUP("pw_cko0_grp3", pw_cko0_pins3),
1005 GROUP("pw_cko1_grp0", pw_cko1_pins0), 1163 GROUP("pw_cko1_grp0", pw_cko1_pins0),
1006 GROUP("pw_cko1_grp1", pw_cko1_pins1), 1164 GROUP("pw_cko1_grp1", pw_cko1_pins1),
1165 GROUP("pw_cko1_grp2", pw_cko1_pins2),
1007 GROUP("pw_i2s01_clk_grp0", pw_i2s01_clk_pins0), 1166 GROUP("pw_i2s01_clk_grp0", pw_i2s01_clk_pins0),
1008 GROUP("pw_i2s01_clk_grp1", pw_i2s01_clk_pins1), 1167 GROUP("pw_i2s01_clk_grp1", pw_i2s01_clk_pins1),
1009 GROUP("pw_pwm0_grp", pw_pwm0_pins), 1168 GROUP("pw_i2s01_clk_grp2", pw_i2s01_clk_pins2),
1010 GROUP("pw_pwm1_grp", pw_pwm1_pins), 1169 GROUP("pw_pwm0_grp0", pw_pwm0_pins0),
1170 GROUP("pw_pwm0_grp1", pw_pwm0_pins1),
1171 GROUP("pw_pwm1_grp0", pw_pwm1_pins0),
1172 GROUP("pw_pwm1_grp1", pw_pwm1_pins1),
1173 GROUP("pw_pwm1_grp2", pw_pwm1_pins2),
1011 GROUP("pw_pwm2_grp0", pw_pwm2_pins0), 1174 GROUP("pw_pwm2_grp0", pw_pwm2_pins0),
1012 GROUP("pw_pwm2_grp1", pw_pwm2_pins1), 1175 GROUP("pw_pwm2_grp1", pw_pwm2_pins1),
1176 GROUP("pw_pwm2_grp2", pw_pwm2_pins2),
1013 GROUP("pw_pwm3_grp0", pw_pwm3_pins0), 1177 GROUP("pw_pwm3_grp0", pw_pwm3_pins0),
1014 GROUP("pw_pwm3_grp1", pw_pwm3_pins1), 1178 GROUP("pw_pwm3_grp1", pw_pwm3_pins1),
1015 GROUP("pw_pwm_cpu_vol_grp0", pw_pwm_cpu_vol_pins0), 1179 GROUP("pw_pwm_cpu_vol_grp0", pw_pwm_cpu_vol_pins0),
1016 GROUP("pw_pwm_cpu_vol_grp1", pw_pwm_cpu_vol_pins1), 1180 GROUP("pw_pwm_cpu_vol_grp1", pw_pwm_cpu_vol_pins1),
1181 GROUP("pw_pwm_cpu_vol_grp2", pw_pwm_cpu_vol_pins2),
1017 GROUP("pw_backlight_grp0", pw_backlight_pins0), 1182 GROUP("pw_backlight_grp0", pw_backlight_pins0),
1018 GROUP("pw_backlight_grp1", pw_backlight_pins1), 1183 GROUP("pw_backlight_grp1", pw_backlight_pins1),
1019 GROUP("rg_eth_mac_grp", rg_eth_mac_pins), 1184 GROUP("rg_eth_mac_grp", rg_eth_mac_pins),
@@ -1026,8 +1191,11 @@ struct atlas7_pin_group altas7_pin_groups[] = {
1026 GROUP("sd1_grp", sd1_pins), 1191 GROUP("sd1_grp", sd1_pins),
1027 GROUP("sd1_4bit_grp0", sd1_4bit_pins0), 1192 GROUP("sd1_4bit_grp0", sd1_4bit_pins0),
1028 GROUP("sd1_4bit_grp1", sd1_4bit_pins1), 1193 GROUP("sd1_4bit_grp1", sd1_4bit_pins1),
1029 GROUP("sd2_grp0", sd2_pins0), 1194 GROUP("sd2_basic_grp", sd2_basic_pins),
1030 GROUP("sd2_no_cdb_grp0", sd2_no_cdb_pins0), 1195 GROUP("sd2_cdb_grp0", sd2_cdb_pins0),
1196 GROUP("sd2_cdb_grp1", sd2_cdb_pins1),
1197 GROUP("sd2_wpb_grp0", sd2_wpb_pins0),
1198 GROUP("sd2_wpb_grp1", sd2_wpb_pins1),
1031 GROUP("sd3_grp", sd3_pins), 1199 GROUP("sd3_grp", sd3_pins),
1032 GROUP("sd5_grp", sd5_pins), 1200 GROUP("sd5_grp", sd5_pins),
1033 GROUP("sd6_grp0", sd6_pins0), 1201 GROUP("sd6_grp0", sd6_pins0),
@@ -1039,19 +1207,39 @@ struct atlas7_pin_group altas7_pin_groups[] = {
1039 GROUP("uart0_grp", uart0_pins), 1207 GROUP("uart0_grp", uart0_pins),
1040 GROUP("uart0_nopause_grp", uart0_nopause_pins), 1208 GROUP("uart0_nopause_grp", uart0_nopause_pins),
1041 GROUP("uart1_grp", uart1_pins), 1209 GROUP("uart1_grp", uart1_pins),
1042 GROUP("uart2_grp", uart2_pins), 1210 GROUP("uart2_cts_grp0", uart2_cts_pins0),
1043 GROUP("uart3_grp0", uart3_pins0), 1211 GROUP("uart2_cts_grp1", uart2_cts_pins1),
1044 GROUP("uart3_grp1", uart3_pins1), 1212 GROUP("uart2_rts_grp0", uart2_rts_pins0),
1045 GROUP("uart3_grp2", uart3_pins2), 1213 GROUP("uart2_rts_grp1", uart2_rts_pins1),
1046 GROUP("uart3_grp3", uart3_pins3), 1214 GROUP("uart2_rxd_grp0", uart2_rxd_pins0),
1047 GROUP("uart3_nopause_grp0", uart3_nopause_pins0), 1215 GROUP("uart2_rxd_grp1", uart2_rxd_pins1),
1048 GROUP("uart3_nopause_grp1", uart3_nopause_pins1), 1216 GROUP("uart2_rxd_grp2", uart2_rxd_pins2),
1049 GROUP("uart4_grp0", uart4_pins0), 1217 GROUP("uart2_txd_grp0", uart2_txd_pins0),
1050 GROUP("uart4_grp1", uart4_pins1), 1218 GROUP("uart2_txd_grp1", uart2_txd_pins1),
1051 GROUP("uart4_grp2", uart4_pins2), 1219 GROUP("uart2_txd_grp2", uart2_txd_pins2),
1052 GROUP("uart4_nopause_grp", uart4_nopause_pins), 1220 GROUP("uart3_cts_grp0", uart3_cts_pins0),
1053 GROUP("usb0_drvvbus_grp", usb0_drvvbus_pins), 1221 GROUP("uart3_cts_grp1", uart3_cts_pins1),
1054 GROUP("usb1_drvvbus_grp", usb1_drvvbus_pins), 1222 GROUP("uart3_cts_grp2", uart3_cts_pins2),
1223 GROUP("uart3_rts_grp0", uart3_rts_pins0),
1224 GROUP("uart3_rts_grp1", uart3_rts_pins1),
1225 GROUP("uart3_rts_grp2", uart3_rts_pins2),
1226 GROUP("uart3_rxd_grp0", uart3_rxd_pins0),
1227 GROUP("uart3_rxd_grp1", uart3_rxd_pins1),
1228 GROUP("uart3_rxd_grp2", uart3_rxd_pins2),
1229 GROUP("uart3_txd_grp0", uart3_txd_pins0),
1230 GROUP("uart3_txd_grp1", uart3_txd_pins1),
1231 GROUP("uart3_txd_grp2", uart3_txd_pins2),
1232 GROUP("uart4_basic_grp", uart4_basic_pins),
1233 GROUP("uart4_cts_grp0", uart4_cts_pins0),
1234 GROUP("uart4_cts_grp1", uart4_cts_pins1),
1235 GROUP("uart4_cts_grp2", uart4_cts_pins2),
1236 GROUP("uart4_rts_grp0", uart4_rts_pins0),
1237 GROUP("uart4_rts_grp1", uart4_rts_pins1),
1238 GROUP("uart4_rts_grp2", uart4_rts_pins2),
1239 GROUP("usb0_drvvbus_grp0", usb0_drvvbus_pins0),
1240 GROUP("usb0_drvvbus_grp1", usb0_drvvbus_pins1),
1241 GROUP("usb1_drvvbus_grp0", usb1_drvvbus_pins0),
1242 GROUP("usb1_drvvbus_grp1", usb1_drvvbus_pins1),
1055 GROUP("visbus_dout_grp", visbus_dout_pins), 1243 GROUP("visbus_dout_grp", visbus_dout_pins),
1056 GROUP("vi_vip1_grp", vi_vip1_pins), 1244 GROUP("vi_vip1_grp", vi_vip1_pins),
1057 GROUP("vi_vip1_ext_grp", vi_vip1_ext_pins), 1245 GROUP("vi_vip1_ext_grp", vi_vip1_ext_pins),
@@ -1065,23 +1253,90 @@ static const char * const lcd_vip_gpio_grp[] = { "lcd_vip_gpio_grp", };
1065static const char * const sdio_i2s_gpio_grp[] = { "sdio_i2s_gpio_grp", }; 1253static const char * const sdio_i2s_gpio_grp[] = { "sdio_i2s_gpio_grp", };
1066static const char * const sp_rgmii_gpio_grp[] = { "sp_rgmii_gpio_grp", }; 1254static const char * const sp_rgmii_gpio_grp[] = { "sp_rgmii_gpio_grp", };
1067static const char * const lvds_gpio_grp[] = { "lvds_gpio_grp", }; 1255static const char * const lvds_gpio_grp[] = { "lvds_gpio_grp", };
1068static const char * const uart_nand_gpio_grp[] = { "uart_nand_gpio_grp", }; 1256static const char * const jtag_uart_nand_gpio_grp[] = {
1257 "jtag_uart_nand_gpio_grp", };
1069static const char * const rtc_gpio_grp[] = { "rtc_gpio_grp", }; 1258static const char * const rtc_gpio_grp[] = { "rtc_gpio_grp", };
1070static const char * const audio_ac97_grp[] = { "audio_ac97_grp", }; 1259static const char * const audio_ac97_grp[] = { "audio_ac97_grp", };
1260static const char * const audio_digmic_grp0[] = { "audio_digmic_grp0", };
1261static const char * const audio_digmic_grp1[] = { "audio_digmic_grp1", };
1262static const char * const audio_digmic_grp2[] = { "audio_digmic_grp2", };
1071static const char * const audio_func_dbg_grp[] = { "audio_func_dbg_grp", }; 1263static const char * const audio_func_dbg_grp[] = { "audio_func_dbg_grp", };
1072static const char * const audio_i2s_grp[] = { "audio_i2s_grp", }; 1264static const char * const audio_i2s_grp[] = { "audio_i2s_grp", };
1073static const char * const audio_i2s_2ch_grp[] = { "audio_i2s_2ch_grp", }; 1265static const char * const audio_i2s_2ch_grp[] = { "audio_i2s_2ch_grp", };
1074static const char * const audio_i2s_extclk_grp[] = { "audio_i2s_extclk_grp", }; 1266static const char * const audio_i2s_extclk_grp[] = { "audio_i2s_extclk_grp", };
1075static const char * const audio_uart0_grp[] = { "audio_uart0_grp", }; 1267static const char * const audio_spdif_out_grp0[] = { "audio_spdif_out_grp0", };
1076static const char * const audio_uart1_grp[] = { "audio_uart1_grp", }; 1268static const char * const audio_spdif_out_grp1[] = { "audio_spdif_out_grp1", };
1077static const char * const audio_uart2_grp0[] = { "audio_uart2_grp0", }; 1269static const char * const audio_spdif_out_grp2[] = { "audio_spdif_out_grp2", };
1078static const char * const audio_uart2_grp1[] = { "audio_uart2_grp1", }; 1270static const char * const audio_uart0_basic_grp[] = {
1079static const char * const c_can_trnsvr_grp[] = { "c_can_trnsvr_grp", }; 1271 "audio_uart0_basic_grp", };
1080static const char * const c0_can_grp0[] = { "c0_can_grp0", }; 1272static const char * const audio_uart0_urfs_grp0[] = {
1081static const char * const c0_can_grp1[] = { "c0_can_grp1", }; 1273 "audio_uart0_urfs_grp0", };
1082static const char * const c1_can_grp0[] = { "c1_can_grp0", }; 1274static const char * const audio_uart0_urfs_grp1[] = {
1083static const char * const c1_can_grp1[] = { "c1_can_grp1", }; 1275 "audio_uart0_urfs_grp1", };
1084static const char * const c1_can_grp2[] = { "c1_can_grp2", }; 1276static const char * const audio_uart0_urfs_grp2[] = {
1277 "audio_uart0_urfs_grp2", };
1278static const char * const audio_uart0_urfs_grp3[] = {
1279 "audio_uart0_urfs_grp3", };
1280static const char * const audio_uart1_basic_grp[] = {
1281 "audio_uart1_basic_grp", };
1282static const char * const audio_uart1_urfs_grp0[] = {
1283 "audio_uart1_urfs_grp0", };
1284static const char * const audio_uart1_urfs_grp1[] = {
1285 "audio_uart1_urfs_grp1", };
1286static const char * const audio_uart1_urfs_grp2[] = {
1287 "audio_uart1_urfs_grp2", };
1288static const char * const audio_uart2_urfs_grp0[] = {
1289 "audio_uart2_urfs_grp0", };
1290static const char * const audio_uart2_urfs_grp1[] = {
1291 "audio_uart2_urfs_grp1", };
1292static const char * const audio_uart2_urfs_grp2[] = {
1293 "audio_uart2_urfs_grp2", };
1294static const char * const audio_uart2_urxd_grp0[] = {
1295 "audio_uart2_urxd_grp0", };
1296static const char * const audio_uart2_urxd_grp1[] = {
1297 "audio_uart2_urxd_grp1", };
1298static const char * const audio_uart2_urxd_grp2[] = {
1299 "audio_uart2_urxd_grp2", };
1300static const char * const audio_uart2_usclk_grp0[] = {
1301 "audio_uart2_usclk_grp0", };
1302static const char * const audio_uart2_usclk_grp1[] = {
1303 "audio_uart2_usclk_grp1", };
1304static const char * const audio_uart2_usclk_grp2[] = {
1305 "audio_uart2_usclk_grp2", };
1306static const char * const audio_uart2_utfs_grp0[] = {
1307 "audio_uart2_utfs_grp0", };
1308static const char * const audio_uart2_utfs_grp1[] = {
1309 "audio_uart2_utfs_grp1", };
1310static const char * const audio_uart2_utfs_grp2[] = {
1311 "audio_uart2_utfs_grp2", };
1312static const char * const audio_uart2_utxd_grp0[] = {
1313 "audio_uart2_utxd_grp0", };
1314static const char * const audio_uart2_utxd_grp1[] = {
1315 "audio_uart2_utxd_grp1", };
1316static const char * const audio_uart2_utxd_grp2[] = {
1317 "audio_uart2_utxd_grp2", };
1318static const char * const c_can_trnsvr_en_grp0[] = { "c_can_trnsvr_en_grp0", };
1319static const char * const c_can_trnsvr_en_grp1[] = { "c_can_trnsvr_en_grp1", };
1320static const char * const c_can_trnsvr_intr_grp[] = {
1321 "c_can_trnsvr_intr_grp", };
1322static const char * const c_can_trnsvr_stb_n_grp[] = {
1323 "c_can_trnsvr_stb_n_grp", };
1324static const char * const c0_can_rxd_trnsv0_grp[] = {
1325 "c0_can_rxd_trnsv0_grp", };
1326static const char * const c0_can_rxd_trnsv1_grp[] = {
1327 "c0_can_rxd_trnsv1_grp", };
1328static const char * const c0_can_txd_trnsv0_grp[] = {
1329 "c0_can_txd_trnsv0_grp", };
1330static const char * const c0_can_txd_trnsv1_grp[] = {
1331 "c0_can_txd_trnsv1_grp", };
1332static const char * const c1_can_rxd_grp0[] = { "c1_can_rxd_grp0", };
1333static const char * const c1_can_rxd_grp1[] = { "c1_can_rxd_grp1", };
1334static const char * const c1_can_rxd_grp2[] = { "c1_can_rxd_grp2", };
1335static const char * const c1_can_rxd_grp3[] = { "c1_can_rxd_grp3", };
1336static const char * const c1_can_txd_grp0[] = { "c1_can_txd_grp0", };
1337static const char * const c1_can_txd_grp1[] = { "c1_can_txd_grp1", };
1338static const char * const c1_can_txd_grp2[] = { "c1_can_txd_grp2", };
1339static const char * const c1_can_txd_grp3[] = { "c1_can_txd_grp3", };
1085static const char * const ca_audio_lpc_grp[] = { "ca_audio_lpc_grp", }; 1340static const char * const ca_audio_lpc_grp[] = { "ca_audio_lpc_grp", };
1086static const char * const ca_bt_lpc_grp[] = { "ca_bt_lpc_grp", }; 1341static const char * const ca_bt_lpc_grp[] = { "ca_bt_lpc_grp", };
1087static const char * const ca_coex_grp[] = { "ca_coex_grp", }; 1342static const char * const ca_coex_grp[] = { "ca_coex_grp", };
@@ -1135,7 +1390,30 @@ static const char * const gn_trg_shutdown_grp2[] = { "gn_trg_shutdown_grp2", };
1135static const char * const gn_trg_shutdown_grp3[] = { "gn_trg_shutdown_grp3", }; 1390static const char * const gn_trg_shutdown_grp3[] = { "gn_trg_shutdown_grp3", };
1136static const char * const i2c0_grp[] = { "i2c0_grp", }; 1391static const char * const i2c0_grp[] = { "i2c0_grp", };
1137static const char * const i2c1_grp[] = { "i2c1_grp", }; 1392static const char * const i2c1_grp[] = { "i2c1_grp", };
1138static const char * const jtag_grp0[] = { "jtag_grp0", }; 1393static const char * const i2s0_grp[] = { "i2s0_grp", };
1394static const char * const i2s1_basic_grp[] = { "i2s1_basic_grp", };
1395static const char * const i2s1_rxd0_grp0[] = { "i2s1_rxd0_grp0", };
1396static const char * const i2s1_rxd0_grp1[] = { "i2s1_rxd0_grp1", };
1397static const char * const i2s1_rxd0_grp2[] = { "i2s1_rxd0_grp2", };
1398static const char * const i2s1_rxd0_grp3[] = { "i2s1_rxd0_grp3", };
1399static const char * const i2s1_rxd0_grp4[] = { "i2s1_rxd0_grp4", };
1400static const char * const i2s1_rxd1_grp0[] = { "i2s1_rxd1_grp0", };
1401static const char * const i2s1_rxd1_grp1[] = { "i2s1_rxd1_grp1", };
1402static const char * const i2s1_rxd1_grp2[] = { "i2s1_rxd1_grp2", };
1403static const char * const i2s1_rxd1_grp3[] = { "i2s1_rxd1_grp3", };
1404static const char * const i2s1_rxd1_grp4[] = { "i2s1_rxd1_grp4", };
1405static const char * const jtag_jt_dbg_nsrst_grp[] = {
1406 "jtag_jt_dbg_nsrst_grp", };
1407static const char * const jtag_ntrst_grp0[] = { "jtag_ntrst_grp0", };
1408static const char * const jtag_ntrst_grp1[] = { "jtag_ntrst_grp1", };
1409static const char * const jtag_swdiotms_grp0[] = { "jtag_swdiotms_grp0", };
1410static const char * const jtag_swdiotms_grp1[] = { "jtag_swdiotms_grp1", };
1411static const char * const jtag_tck_grp0[] = { "jtag_tck_grp0", };
1412static const char * const jtag_tck_grp1[] = { "jtag_tck_grp1", };
1413static const char * const jtag_tdi_grp0[] = { "jtag_tdi_grp0", };
1414static const char * const jtag_tdi_grp1[] = { "jtag_tdi_grp1", };
1415static const char * const jtag_tdo_grp0[] = { "jtag_tdo_grp0", };
1416static const char * const jtag_tdo_grp1[] = { "jtag_tdo_grp1", };
1139static const char * const ks_kas_spi_grp0[] = { "ks_kas_spi_grp0", }; 1417static const char * const ks_kas_spi_grp0[] = { "ks_kas_spi_grp0", };
1140static const char * const ld_ldd_grp[] = { "ld_ldd_grp", }; 1418static const char * const ld_ldd_grp[] = { "ld_ldd_grp", };
1141static const char * const ld_ldd_16bit_grp[] = { "ld_ldd_16bit_grp", }; 1419static const char * const ld_ldd_16bit_grp[] = { "ld_ldd_16bit_grp", };
@@ -1160,18 +1438,26 @@ static const char * const pwc_wakeup_src3_grp[] = { "pwc_wakeup_src3_grp", };
1160static const char * const pw_cko0_grp0[] = { "pw_cko0_grp0", }; 1438static const char * const pw_cko0_grp0[] = { "pw_cko0_grp0", };
1161static const char * const pw_cko0_grp1[] = { "pw_cko0_grp1", }; 1439static const char * const pw_cko0_grp1[] = { "pw_cko0_grp1", };
1162static const char * const pw_cko0_grp2[] = { "pw_cko0_grp2", }; 1440static const char * const pw_cko0_grp2[] = { "pw_cko0_grp2", };
1441static const char * const pw_cko0_grp3[] = { "pw_cko0_grp3", };
1163static const char * const pw_cko1_grp0[] = { "pw_cko1_grp0", }; 1442static const char * const pw_cko1_grp0[] = { "pw_cko1_grp0", };
1164static const char * const pw_cko1_grp1[] = { "pw_cko1_grp1", }; 1443static const char * const pw_cko1_grp1[] = { "pw_cko1_grp1", };
1444static const char * const pw_cko1_grp2[] = { "pw_cko1_grp2", };
1165static const char * const pw_i2s01_clk_grp0[] = { "pw_i2s01_clk_grp0", }; 1445static const char * const pw_i2s01_clk_grp0[] = { "pw_i2s01_clk_grp0", };
1166static const char * const pw_i2s01_clk_grp1[] = { "pw_i2s01_clk_grp1", }; 1446static const char * const pw_i2s01_clk_grp1[] = { "pw_i2s01_clk_grp1", };
1167static const char * const pw_pwm0_grp[] = { "pw_pwm0_grp", }; 1447static const char * const pw_i2s01_clk_grp2[] = { "pw_i2s01_clk_grp2", };
1168static const char * const pw_pwm1_grp[] = { "pw_pwm1_grp", }; 1448static const char * const pw_pwm0_grp0[] = { "pw_pwm0_grp0", };
1449static const char * const pw_pwm0_grp1[] = { "pw_pwm0_grp1", };
1450static const char * const pw_pwm1_grp0[] = { "pw_pwm1_grp0", };
1451static const char * const pw_pwm1_grp1[] = { "pw_pwm1_grp1", };
1452static const char * const pw_pwm1_grp2[] = { "pw_pwm1_grp2", };
1169static const char * const pw_pwm2_grp0[] = { "pw_pwm2_grp0", }; 1453static const char * const pw_pwm2_grp0[] = { "pw_pwm2_grp0", };
1170static const char * const pw_pwm2_grp1[] = { "pw_pwm2_grp1", }; 1454static const char * const pw_pwm2_grp1[] = { "pw_pwm2_grp1", };
1455static const char * const pw_pwm2_grp2[] = { "pw_pwm2_grp2", };
1171static const char * const pw_pwm3_grp0[] = { "pw_pwm3_grp0", }; 1456static const char * const pw_pwm3_grp0[] = { "pw_pwm3_grp0", };
1172static const char * const pw_pwm3_grp1[] = { "pw_pwm3_grp1", }; 1457static const char * const pw_pwm3_grp1[] = { "pw_pwm3_grp1", };
1173static const char * const pw_pwm_cpu_vol_grp0[] = { "pw_pwm_cpu_vol_grp0", }; 1458static const char * const pw_pwm_cpu_vol_grp0[] = { "pw_pwm_cpu_vol_grp0", };
1174static const char * const pw_pwm_cpu_vol_grp1[] = { "pw_pwm_cpu_vol_grp1", }; 1459static const char * const pw_pwm_cpu_vol_grp1[] = { "pw_pwm_cpu_vol_grp1", };
1460static const char * const pw_pwm_cpu_vol_grp2[] = { "pw_pwm_cpu_vol_grp2", };
1175static const char * const pw_backlight_grp0[] = { "pw_backlight_grp0", }; 1461static const char * const pw_backlight_grp0[] = { "pw_backlight_grp0", };
1176static const char * const pw_backlight_grp1[] = { "pw_backlight_grp1", }; 1462static const char * const pw_backlight_grp1[] = { "pw_backlight_grp1", };
1177static const char * const rg_eth_mac_grp[] = { "rg_eth_mac_grp", }; 1463static const char * const rg_eth_mac_grp[] = { "rg_eth_mac_grp", };
@@ -1187,8 +1473,11 @@ static const char * const sd0_4bit_grp[] = { "sd0_4bit_grp", };
1187static const char * const sd1_grp[] = { "sd1_grp", }; 1473static const char * const sd1_grp[] = { "sd1_grp", };
1188static const char * const sd1_4bit_grp0[] = { "sd1_4bit_grp0", }; 1474static const char * const sd1_4bit_grp0[] = { "sd1_4bit_grp0", };
1189static const char * const sd1_4bit_grp1[] = { "sd1_4bit_grp1", }; 1475static const char * const sd1_4bit_grp1[] = { "sd1_4bit_grp1", };
1190static const char * const sd2_grp0[] = { "sd2_grp0", }; 1476static const char * const sd2_basic_grp[] = { "sd2_basic_grp", };
1191static const char * const sd2_no_cdb_grp0[] = { "sd2_no_cdb_grp0", }; 1477static const char * const sd2_cdb_grp0[] = { "sd2_cdb_grp0", };
1478static const char * const sd2_cdb_grp1[] = { "sd2_cdb_grp1", };
1479static const char * const sd2_wpb_grp0[] = { "sd2_wpb_grp0", };
1480static const char * const sd2_wpb_grp1[] = { "sd2_wpb_grp1", };
1192static const char * const sd3_grp[] = { "sd3_grp", }; 1481static const char * const sd3_grp[] = { "sd3_grp", };
1193static const char * const sd5_grp[] = { "sd5_grp", }; 1482static const char * const sd5_grp[] = { "sd5_grp", };
1194static const char * const sd6_grp0[] = { "sd6_grp0", }; 1483static const char * const sd6_grp0[] = { "sd6_grp0", };
@@ -1200,19 +1489,39 @@ static const char * const tpiu_trace_grp[] = { "tpiu_trace_grp", };
1200static const char * const uart0_grp[] = { "uart0_grp", }; 1489static const char * const uart0_grp[] = { "uart0_grp", };
1201static const char * const uart0_nopause_grp[] = { "uart0_nopause_grp", }; 1490static const char * const uart0_nopause_grp[] = { "uart0_nopause_grp", };
1202static const char * const uart1_grp[] = { "uart1_grp", }; 1491static const char * const uart1_grp[] = { "uart1_grp", };
1203static const char * const uart2_grp[] = { "uart2_grp", }; 1492static const char * const uart2_cts_grp0[] = { "uart2_cts_grp0", };
1204static const char * const uart3_grp0[] = { "uart3_grp0", }; 1493static const char * const uart2_cts_grp1[] = { "uart2_cts_grp1", };
1205static const char * const uart3_grp1[] = { "uart3_grp1", }; 1494static const char * const uart2_rts_grp0[] = { "uart2_rts_grp0", };
1206static const char * const uart3_grp2[] = { "uart3_grp2", }; 1495static const char * const uart2_rts_grp1[] = { "uart2_rts_grp1", };
1207static const char * const uart3_grp3[] = { "uart3_grp3", }; 1496static const char * const uart2_rxd_grp0[] = { "uart2_rxd_grp0", };
1208static const char * const uart3_nopause_grp0[] = { "uart3_nopause_grp0", }; 1497static const char * const uart2_rxd_grp1[] = { "uart2_rxd_grp1", };
1209static const char * const uart3_nopause_grp1[] = { "uart3_nopause_grp1", }; 1498static const char * const uart2_rxd_grp2[] = { "uart2_rxd_grp2", };
1210static const char * const uart4_grp0[] = { "uart4_grp0", }; 1499static const char * const uart2_txd_grp0[] = { "uart2_txd_grp0", };
1211static const char * const uart4_grp1[] = { "uart4_grp1", }; 1500static const char * const uart2_txd_grp1[] = { "uart2_txd_grp1", };
1212static const char * const uart4_grp2[] = { "uart4_grp2", }; 1501static const char * const uart2_txd_grp2[] = { "uart2_txd_grp2", };
1213static const char * const uart4_nopause_grp[] = { "uart4_nopause_grp", }; 1502static const char * const uart3_cts_grp0[] = { "uart3_cts_grp0", };
1214static const char * const usb0_drvvbus_grp[] = { "usb0_drvvbus_grp", }; 1503static const char * const uart3_cts_grp1[] = { "uart3_cts_grp1", };
1215static const char * const usb1_drvvbus_grp[] = { "usb1_drvvbus_grp", }; 1504static const char * const uart3_cts_grp2[] = { "uart3_cts_grp2", };
1505static const char * const uart3_rts_grp0[] = { "uart3_rts_grp0", };
1506static const char * const uart3_rts_grp1[] = { "uart3_rts_grp1", };
1507static const char * const uart3_rts_grp2[] = { "uart3_rts_grp2", };
1508static const char * const uart3_rxd_grp0[] = { "uart3_rxd_grp0", };
1509static const char * const uart3_rxd_grp1[] = { "uart3_rxd_grp1", };
1510static const char * const uart3_rxd_grp2[] = { "uart3_rxd_grp2", };
1511static const char * const uart3_txd_grp0[] = { "uart3_txd_grp0", };
1512static const char * const uart3_txd_grp1[] = { "uart3_txd_grp1", };
1513static const char * const uart3_txd_grp2[] = { "uart3_txd_grp2", };
1514static const char * const uart4_basic_grp[] = { "uart4_basic_grp", };
1515static const char * const uart4_cts_grp0[] = { "uart4_cts_grp0", };
1516static const char * const uart4_cts_grp1[] = { "uart4_cts_grp1", };
1517static const char * const uart4_cts_grp2[] = { "uart4_cts_grp2", };
1518static const char * const uart4_rts_grp0[] = { "uart4_rts_grp0", };
1519static const char * const uart4_rts_grp1[] = { "uart4_rts_grp1", };
1520static const char * const uart4_rts_grp2[] = { "uart4_rts_grp2", };
1521static const char * const usb0_drvvbus_grp0[] = { "usb0_drvvbus_grp0", };
1522static const char * const usb0_drvvbus_grp1[] = { "usb0_drvvbus_grp1", };
1523static const char * const usb1_drvvbus_grp0[] = { "usb1_drvvbus_grp0", };
1524static const char * const usb1_drvvbus_grp1[] = { "usb1_drvvbus_grp1", };
1216static const char * const visbus_dout_grp[] = { "visbus_dout_grp", }; 1525static const char * const visbus_dout_grp[] = { "visbus_dout_grp", };
1217static const char * const vi_vip1_grp[] = { "vi_vip1_grp", }; 1526static const char * const vi_vip1_grp[] = { "vi_vip1_grp", };
1218static const char * const vi_vip1_ext_grp[] = { "vi_vip1_ext_grp", }; 1527static const char * const vi_vip1_ext_grp[] = { "vi_vip1_ext_grp", };
@@ -1376,7 +1685,7 @@ static struct atlas7_grp_mux lvds_gpio_grp_mux = {
1376 .pad_mux_list = lvds_gpio_grp_pad_mux, 1685 .pad_mux_list = lvds_gpio_grp_pad_mux,
1377}; 1686};
1378 1687
1379static struct atlas7_pad_mux uart_nand_gpio_grp_pad_mux[] = { 1688static struct atlas7_pad_mux jtag_uart_nand_gpio_grp_pad_mux[] = {
1380 MUX(1, 44, 0, N, N, N, N), 1689 MUX(1, 44, 0, N, N, N, N),
1381 MUX(1, 43, 0, N, N, N, N), 1690 MUX(1, 43, 0, N, N, N, N),
1382 MUX(1, 42, 0, N, N, N, N), 1691 MUX(1, 42, 0, N, N, N, N),
@@ -1401,11 +1710,16 @@ static struct atlas7_pad_mux uart_nand_gpio_grp_pad_mux[] = {
1401 MUX(1, 138, 0, N, N, N, N), 1710 MUX(1, 138, 0, N, N, N, N),
1402 MUX(1, 139, 0, N, N, N, N), 1711 MUX(1, 139, 0, N, N, N, N),
1403 MUX(1, 140, 0, N, N, N, N), 1712 MUX(1, 140, 0, N, N, N, N),
1713 MUX(1, 159, 0, N, N, N, N),
1714 MUX(1, 160, 0, N, N, N, N),
1715 MUX(1, 161, 0, N, N, N, N),
1716 MUX(1, 162, 0, N, N, N, N),
1717 MUX(1, 163, 0, N, N, N, N),
1404}; 1718};
1405 1719
1406static struct atlas7_grp_mux uart_nand_gpio_grp_mux = { 1720static struct atlas7_grp_mux jtag_uart_nand_gpio_grp_mux = {
1407 .pad_mux_count = ARRAY_SIZE(uart_nand_gpio_grp_pad_mux), 1721 .pad_mux_count = ARRAY_SIZE(jtag_uart_nand_gpio_grp_pad_mux),
1408 .pad_mux_list = uart_nand_gpio_grp_pad_mux, 1722 .pad_mux_list = jtag_uart_nand_gpio_grp_pad_mux,
1409}; 1723};
1410 1724
1411static struct atlas7_pad_mux rtc_gpio_grp_pad_mux[] = { 1725static struct atlas7_pad_mux rtc_gpio_grp_pad_mux[] = {
@@ -1422,6 +1736,7 @@ static struct atlas7_pad_mux rtc_gpio_grp_pad_mux[] = {
1422 MUX(0, 15, 0, N, N, N, N), 1736 MUX(0, 15, 0, N, N, N, N),
1423 MUX(0, 16, 0, N, N, N, N), 1737 MUX(0, 16, 0, N, N, N, N),
1424 MUX(0, 17, 0, N, N, N, N), 1738 MUX(0, 17, 0, N, N, N, N),
1739 MUX(0, 9, 0, N, N, N, N),
1425}; 1740};
1426 1741
1427static struct atlas7_grp_mux rtc_gpio_grp_mux = { 1742static struct atlas7_grp_mux rtc_gpio_grp_mux = {
@@ -1441,6 +1756,33 @@ static struct atlas7_grp_mux audio_ac97_grp_mux = {
1441 .pad_mux_list = audio_ac97_grp_pad_mux, 1756 .pad_mux_list = audio_ac97_grp_pad_mux,
1442}; 1757};
1443 1758
1759static struct atlas7_pad_mux audio_digmic_grp0_pad_mux[] = {
1760 MUX(1, 51, 3, 0xa10, 20, 0xa90, 20),
1761};
1762
1763static struct atlas7_grp_mux audio_digmic_grp0_mux = {
1764 .pad_mux_count = ARRAY_SIZE(audio_digmic_grp0_pad_mux),
1765 .pad_mux_list = audio_digmic_grp0_pad_mux,
1766};
1767
1768static struct atlas7_pad_mux audio_digmic_grp1_pad_mux[] = {
1769 MUX(1, 122, 5, 0xa10, 20, 0xa90, 20),
1770};
1771
1772static struct atlas7_grp_mux audio_digmic_grp1_mux = {
1773 .pad_mux_count = ARRAY_SIZE(audio_digmic_grp1_pad_mux),
1774 .pad_mux_list = audio_digmic_grp1_pad_mux,
1775};
1776
1777static struct atlas7_pad_mux audio_digmic_grp2_pad_mux[] = {
1778 MUX(1, 161, 7, 0xa10, 20, 0xa90, 20),
1779};
1780
1781static struct atlas7_grp_mux audio_digmic_grp2_mux = {
1782 .pad_mux_count = ARRAY_SIZE(audio_digmic_grp2_pad_mux),
1783 .pad_mux_list = audio_digmic_grp2_pad_mux,
1784};
1785
1444static struct atlas7_pad_mux audio_func_dbg_grp_pad_mux[] = { 1786static struct atlas7_pad_mux audio_func_dbg_grp_pad_mux[] = {
1445 MUX(1, 141, 4, N, N, N, N), 1787 MUX(1, 141, 4, N, N, N, N),
1446 MUX(1, 144, 4, N, N, N, N), 1788 MUX(1, 144, 4, N, N, N, N),
@@ -1512,111 +1854,397 @@ static struct atlas7_grp_mux audio_i2s_extclk_grp_mux = {
1512 .pad_mux_list = audio_i2s_extclk_grp_pad_mux, 1854 .pad_mux_list = audio_i2s_extclk_grp_pad_mux,
1513}; 1855};
1514 1856
1515static struct atlas7_pad_mux audio_uart0_grp_pad_mux[] = { 1857static struct atlas7_pad_mux audio_spdif_out_grp0_pad_mux[] = {
1858 MUX(1, 112, 3, N, N, N, N),
1859};
1860
1861static struct atlas7_grp_mux audio_spdif_out_grp0_mux = {
1862 .pad_mux_count = ARRAY_SIZE(audio_spdif_out_grp0_pad_mux),
1863 .pad_mux_list = audio_spdif_out_grp0_pad_mux,
1864};
1865
1866static struct atlas7_pad_mux audio_spdif_out_grp1_pad_mux[] = {
1867 MUX(1, 116, 3, N, N, N, N),
1868};
1869
1870static struct atlas7_grp_mux audio_spdif_out_grp1_mux = {
1871 .pad_mux_count = ARRAY_SIZE(audio_spdif_out_grp1_pad_mux),
1872 .pad_mux_list = audio_spdif_out_grp1_pad_mux,
1873};
1874
1875static struct atlas7_pad_mux audio_spdif_out_grp2_pad_mux[] = {
1876 MUX(1, 142, 3, N, N, N, N),
1877};
1878
1879static struct atlas7_grp_mux audio_spdif_out_grp2_mux = {
1880 .pad_mux_count = ARRAY_SIZE(audio_spdif_out_grp2_pad_mux),
1881 .pad_mux_list = audio_spdif_out_grp2_pad_mux,
1882};
1883
1884static struct atlas7_pad_mux audio_uart0_basic_grp_pad_mux[] = {
1516 MUX(1, 143, 1, N, N, N, N), 1885 MUX(1, 143, 1, N, N, N, N),
1517 MUX(1, 142, 1, N, N, N, N), 1886 MUX(1, 142, 1, N, N, N, N),
1518 MUX(1, 141, 1, N, N, N, N), 1887 MUX(1, 141, 1, N, N, N, N),
1519 MUX(1, 144, 1, N, N, N, N), 1888 MUX(1, 144, 1, N, N, N, N),
1520}; 1889};
1521 1890
1522static struct atlas7_grp_mux audio_uart0_grp_mux = { 1891static struct atlas7_grp_mux audio_uart0_basic_grp_mux = {
1523 .pad_mux_count = ARRAY_SIZE(audio_uart0_grp_pad_mux), 1892 .pad_mux_count = ARRAY_SIZE(audio_uart0_basic_grp_pad_mux),
1524 .pad_mux_list = audio_uart0_grp_pad_mux, 1893 .pad_mux_list = audio_uart0_basic_grp_pad_mux,
1894};
1895
1896static struct atlas7_pad_mux audio_uart0_urfs_grp0_pad_mux[] = {
1897 MUX(1, 117, 5, 0xa10, 28, 0xa90, 28),
1525}; 1898};
1526 1899
1527static struct atlas7_pad_mux audio_uart1_grp_pad_mux[] = { 1900static struct atlas7_grp_mux audio_uart0_urfs_grp0_mux = {
1528 MUX(1, 147, 1, N, N, N, N), 1901 .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp0_pad_mux),
1529 MUX(1, 146, 1, N, N, N, N), 1902 .pad_mux_list = audio_uart0_urfs_grp0_pad_mux,
1530 MUX(1, 145, 1, N, N, N, N),
1531 MUX(1, 148, 1, N, N, N, N),
1532}; 1903};
1533 1904
1534static struct atlas7_grp_mux audio_uart1_grp_mux = { 1905static struct atlas7_pad_mux audio_uart0_urfs_grp1_pad_mux[] = {
1535 .pad_mux_count = ARRAY_SIZE(audio_uart1_grp_pad_mux), 1906 MUX(1, 139, 3, 0xa10, 28, 0xa90, 28),
1536 .pad_mux_list = audio_uart1_grp_pad_mux,
1537}; 1907};
1538 1908
1539static struct atlas7_pad_mux audio_uart2_grp0_pad_mux[] = { 1909static struct atlas7_grp_mux audio_uart0_urfs_grp1_mux = {
1910 .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp1_pad_mux),
1911 .pad_mux_list = audio_uart0_urfs_grp1_pad_mux,
1912};
1913
1914static struct atlas7_pad_mux audio_uart0_urfs_grp2_pad_mux[] = {
1915 MUX(1, 163, 3, 0xa10, 28, 0xa90, 28),
1916};
1917
1918static struct atlas7_grp_mux audio_uart0_urfs_grp2_mux = {
1919 .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp2_pad_mux),
1920 .pad_mux_list = audio_uart0_urfs_grp2_pad_mux,
1921};
1922
1923static struct atlas7_pad_mux audio_uart0_urfs_grp3_pad_mux[] = {
1924 MUX(1, 162, 6, 0xa10, 28, 0xa90, 28),
1925};
1926
1927static struct atlas7_grp_mux audio_uart0_urfs_grp3_mux = {
1928 .pad_mux_count = ARRAY_SIZE(audio_uart0_urfs_grp3_pad_mux),
1929 .pad_mux_list = audio_uart0_urfs_grp3_pad_mux,
1930};
1931
1932static struct atlas7_pad_mux audio_uart1_basic_grp_pad_mux[] = {
1933 MUX(1, 147, 1, 0xa10, 24, 0xa90, 24),
1934 MUX(1, 146, 1, 0xa10, 25, 0xa90, 25),
1935 MUX(1, 145, 1, 0xa10, 23, 0xa90, 23),
1936 MUX(1, 148, 1, 0xa10, 22, 0xa90, 22),
1937};
1938
1939static struct atlas7_grp_mux audio_uart1_basic_grp_mux = {
1940 .pad_mux_count = ARRAY_SIZE(audio_uart1_basic_grp_pad_mux),
1941 .pad_mux_list = audio_uart1_basic_grp_pad_mux,
1942};
1943
1944static struct atlas7_pad_mux audio_uart1_urfs_grp0_pad_mux[] = {
1945 MUX(1, 117, 6, 0xa10, 29, 0xa90, 29),
1946};
1947
1948static struct atlas7_grp_mux audio_uart1_urfs_grp0_mux = {
1949 .pad_mux_count = ARRAY_SIZE(audio_uart1_urfs_grp0_pad_mux),
1950 .pad_mux_list = audio_uart1_urfs_grp0_pad_mux,
1951};
1952
1953static struct atlas7_pad_mux audio_uart1_urfs_grp1_pad_mux[] = {
1954 MUX(1, 140, 3, 0xa10, 29, 0xa90, 29),
1955};
1956
1957static struct atlas7_grp_mux audio_uart1_urfs_grp1_mux = {
1958 .pad_mux_count = ARRAY_SIZE(audio_uart1_urfs_grp1_pad_mux),
1959 .pad_mux_list = audio_uart1_urfs_grp1_pad_mux,
1960};
1961
1962static struct atlas7_pad_mux audio_uart1_urfs_grp2_pad_mux[] = {
1963 MUX(1, 163, 4, 0xa10, 29, 0xa90, 29),
1964};
1965
1966static struct atlas7_grp_mux audio_uart1_urfs_grp2_mux = {
1967 .pad_mux_count = ARRAY_SIZE(audio_uart1_urfs_grp2_pad_mux),
1968 .pad_mux_list = audio_uart1_urfs_grp2_pad_mux,
1969};
1970
1971static struct atlas7_pad_mux audio_uart2_urfs_grp0_pad_mux[] = {
1972 MUX(1, 139, 4, 0xa10, 30, 0xa90, 30),
1973};
1974
1975static struct atlas7_grp_mux audio_uart2_urfs_grp0_mux = {
1976 .pad_mux_count = ARRAY_SIZE(audio_uart2_urfs_grp0_pad_mux),
1977 .pad_mux_list = audio_uart2_urfs_grp0_pad_mux,
1978};
1979
1980static struct atlas7_pad_mux audio_uart2_urfs_grp1_pad_mux[] = {
1981 MUX(1, 163, 6, 0xa10, 30, 0xa90, 30),
1982};
1983
1984static struct atlas7_grp_mux audio_uart2_urfs_grp1_mux = {
1985 .pad_mux_count = ARRAY_SIZE(audio_uart2_urfs_grp1_pad_mux),
1986 .pad_mux_list = audio_uart2_urfs_grp1_pad_mux,
1987};
1988
1989static struct atlas7_pad_mux audio_uart2_urfs_grp2_pad_mux[] = {
1990 MUX(1, 96, 3, 0xa10, 30, 0xa90, 30),
1991};
1992
1993static struct atlas7_grp_mux audio_uart2_urfs_grp2_mux = {
1994 .pad_mux_count = ARRAY_SIZE(audio_uart2_urfs_grp2_pad_mux),
1995 .pad_mux_list = audio_uart2_urfs_grp2_pad_mux,
1996};
1997
1998static struct atlas7_pad_mux audio_uart2_urxd_grp0_pad_mux[] = {
1540 MUX(1, 20, 2, 0xa00, 24, 0xa80, 24), 1999 MUX(1, 20, 2, 0xa00, 24, 0xa80, 24),
1541 MUX(1, 21, 2, 0xa00, 25, 0xa80, 25),
1542 MUX(1, 19, 2, 0xa00, 23, 0xa80, 23),
1543 MUX(1, 18, 2, 0xa00, 22, 0xa80, 22),
1544}; 2000};
1545 2001
1546static struct atlas7_grp_mux audio_uart2_grp0_mux = { 2002static struct atlas7_grp_mux audio_uart2_urxd_grp0_mux = {
1547 .pad_mux_count = ARRAY_SIZE(audio_uart2_grp0_pad_mux), 2003 .pad_mux_count = ARRAY_SIZE(audio_uart2_urxd_grp0_pad_mux),
1548 .pad_mux_list = audio_uart2_grp0_pad_mux, 2004 .pad_mux_list = audio_uart2_urxd_grp0_pad_mux,
1549}; 2005};
1550 2006
1551static struct atlas7_pad_mux audio_uart2_grp1_pad_mux[] = { 2007static struct atlas7_pad_mux audio_uart2_urxd_grp1_pad_mux[] = {
1552 MUX(1, 109, 2, 0xa00, 24, 0xa80, 24), 2008 MUX(1, 109, 2, 0xa00, 24, 0xa80, 24),
1553 MUX(1, 110, 2, 0xa00, 25, 0xa80, 25), 2009};
2010
2011static struct atlas7_grp_mux audio_uart2_urxd_grp1_mux = {
2012 .pad_mux_count = ARRAY_SIZE(audio_uart2_urxd_grp1_pad_mux),
2013 .pad_mux_list = audio_uart2_urxd_grp1_pad_mux,
2014};
2015
2016static struct atlas7_pad_mux audio_uart2_urxd_grp2_pad_mux[] = {
2017 MUX(1, 93, 3, 0xa00, 24, 0xa80, 24),
2018};
2019
2020static struct atlas7_grp_mux audio_uart2_urxd_grp2_mux = {
2021 .pad_mux_count = ARRAY_SIZE(audio_uart2_urxd_grp2_pad_mux),
2022 .pad_mux_list = audio_uart2_urxd_grp2_pad_mux,
2023};
2024
2025static struct atlas7_pad_mux audio_uart2_usclk_grp0_pad_mux[] = {
2026 MUX(1, 19, 2, 0xa00, 23, 0xa80, 23),
2027};
2028
2029static struct atlas7_grp_mux audio_uart2_usclk_grp0_mux = {
2030 .pad_mux_count = ARRAY_SIZE(audio_uart2_usclk_grp0_pad_mux),
2031 .pad_mux_list = audio_uart2_usclk_grp0_pad_mux,
2032};
2033
2034static struct atlas7_pad_mux audio_uart2_usclk_grp1_pad_mux[] = {
1554 MUX(1, 101, 2, 0xa00, 23, 0xa80, 23), 2035 MUX(1, 101, 2, 0xa00, 23, 0xa80, 23),
2036};
2037
2038static struct atlas7_grp_mux audio_uart2_usclk_grp1_mux = {
2039 .pad_mux_count = ARRAY_SIZE(audio_uart2_usclk_grp1_pad_mux),
2040 .pad_mux_list = audio_uart2_usclk_grp1_pad_mux,
2041};
2042
2043static struct atlas7_pad_mux audio_uart2_usclk_grp2_pad_mux[] = {
2044 MUX(1, 91, 3, 0xa00, 23, 0xa80, 23),
2045};
2046
2047static struct atlas7_grp_mux audio_uart2_usclk_grp2_mux = {
2048 .pad_mux_count = ARRAY_SIZE(audio_uart2_usclk_grp2_pad_mux),
2049 .pad_mux_list = audio_uart2_usclk_grp2_pad_mux,
2050};
2051
2052static struct atlas7_pad_mux audio_uart2_utfs_grp0_pad_mux[] = {
2053 MUX(1, 18, 2, 0xa00, 22, 0xa80, 22),
2054};
2055
2056static struct atlas7_grp_mux audio_uart2_utfs_grp0_mux = {
2057 .pad_mux_count = ARRAY_SIZE(audio_uart2_utfs_grp0_pad_mux),
2058 .pad_mux_list = audio_uart2_utfs_grp0_pad_mux,
2059};
2060
2061static struct atlas7_pad_mux audio_uart2_utfs_grp1_pad_mux[] = {
1555 MUX(1, 111, 2, 0xa00, 22, 0xa80, 22), 2062 MUX(1, 111, 2, 0xa00, 22, 0xa80, 22),
1556}; 2063};
1557 2064
1558static struct atlas7_grp_mux audio_uart2_grp1_mux = { 2065static struct atlas7_grp_mux audio_uart2_utfs_grp1_mux = {
1559 .pad_mux_count = ARRAY_SIZE(audio_uart2_grp1_pad_mux), 2066 .pad_mux_count = ARRAY_SIZE(audio_uart2_utfs_grp1_pad_mux),
1560 .pad_mux_list = audio_uart2_grp1_pad_mux, 2067 .pad_mux_list = audio_uart2_utfs_grp1_pad_mux,
2068};
2069
2070static struct atlas7_pad_mux audio_uart2_utfs_grp2_pad_mux[] = {
2071 MUX(1, 94, 3, 0xa00, 22, 0xa80, 22),
2072};
2073
2074static struct atlas7_grp_mux audio_uart2_utfs_grp2_mux = {
2075 .pad_mux_count = ARRAY_SIZE(audio_uart2_utfs_grp2_pad_mux),
2076 .pad_mux_list = audio_uart2_utfs_grp2_pad_mux,
2077};
2078
2079static struct atlas7_pad_mux audio_uart2_utxd_grp0_pad_mux[] = {
2080 MUX(1, 21, 2, 0xa00, 25, 0xa80, 25),
2081};
2082
2083static struct atlas7_grp_mux audio_uart2_utxd_grp0_mux = {
2084 .pad_mux_count = ARRAY_SIZE(audio_uart2_utxd_grp0_pad_mux),
2085 .pad_mux_list = audio_uart2_utxd_grp0_pad_mux,
2086};
2087
2088static struct atlas7_pad_mux audio_uart2_utxd_grp1_pad_mux[] = {
2089 MUX(1, 110, 2, 0xa00, 25, 0xa80, 25),
2090};
2091
2092static struct atlas7_grp_mux audio_uart2_utxd_grp1_mux = {
2093 .pad_mux_count = ARRAY_SIZE(audio_uart2_utxd_grp1_pad_mux),
2094 .pad_mux_list = audio_uart2_utxd_grp1_pad_mux,
2095};
2096
2097static struct atlas7_pad_mux audio_uart2_utxd_grp2_pad_mux[] = {
2098 MUX(1, 92, 3, 0xa00, 25, 0xa80, 25),
2099};
2100
2101static struct atlas7_grp_mux audio_uart2_utxd_grp2_mux = {
2102 .pad_mux_count = ARRAY_SIZE(audio_uart2_utxd_grp2_pad_mux),
2103 .pad_mux_list = audio_uart2_utxd_grp2_pad_mux,
2104};
2105
2106static struct atlas7_pad_mux c_can_trnsvr_en_grp0_pad_mux[] = {
2107 MUX(0, 2, 6, N, N, N, N),
1561}; 2108};
1562 2109
1563static struct atlas7_pad_mux c_can_trnsvr_grp_pad_mux[] = { 2110static struct atlas7_grp_mux c_can_trnsvr_en_grp0_mux = {
2111 .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_en_grp0_pad_mux),
2112 .pad_mux_list = c_can_trnsvr_en_grp0_pad_mux,
2113};
2114
2115static struct atlas7_pad_mux c_can_trnsvr_en_grp1_pad_mux[] = {
2116 MUX(0, 0, 2, N, N, N, N),
2117};
2118
2119static struct atlas7_grp_mux c_can_trnsvr_en_grp1_mux = {
2120 .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_en_grp1_pad_mux),
2121 .pad_mux_list = c_can_trnsvr_en_grp1_pad_mux,
2122};
2123
2124static struct atlas7_pad_mux c_can_trnsvr_intr_grp_pad_mux[] = {
1564 MUX(0, 1, 2, N, N, N, N), 2125 MUX(0, 1, 2, N, N, N, N),
1565}; 2126};
1566 2127
1567static struct atlas7_grp_mux c_can_trnsvr_grp_mux = { 2128static struct atlas7_grp_mux c_can_trnsvr_intr_grp_mux = {
1568 .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_grp_pad_mux), 2129 .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_intr_grp_pad_mux),
1569 .pad_mux_list = c_can_trnsvr_grp_pad_mux, 2130 .pad_mux_list = c_can_trnsvr_intr_grp_pad_mux,
1570}; 2131};
1571 2132
1572static struct atlas7_pad_mux c0_can_grp0_pad_mux[] = { 2133static struct atlas7_pad_mux c_can_trnsvr_stb_n_grp_pad_mux[] = {
2134 MUX(0, 3, 6, N, N, N, N),
2135};
2136
2137static struct atlas7_grp_mux c_can_trnsvr_stb_n_grp_mux = {
2138 .pad_mux_count = ARRAY_SIZE(c_can_trnsvr_stb_n_grp_pad_mux),
2139 .pad_mux_list = c_can_trnsvr_stb_n_grp_pad_mux,
2140};
2141
2142static struct atlas7_pad_mux c0_can_rxd_trnsv0_grp_pad_mux[] = {
1573 MUX(0, 11, 1, 0xa08, 9, 0xa88, 9), 2143 MUX(0, 11, 1, 0xa08, 9, 0xa88, 9),
2144};
2145
2146static struct atlas7_grp_mux c0_can_rxd_trnsv0_grp_mux = {
2147 .pad_mux_count = ARRAY_SIZE(c0_can_rxd_trnsv0_grp_pad_mux),
2148 .pad_mux_list = c0_can_rxd_trnsv0_grp_pad_mux,
2149};
2150
2151static struct atlas7_pad_mux c0_can_rxd_trnsv1_grp_pad_mux[] = {
2152 MUX(0, 2, 5, 0xa10, 9, 0xa90, 9),
2153};
2154
2155static struct atlas7_grp_mux c0_can_rxd_trnsv1_grp_mux = {
2156 .pad_mux_count = ARRAY_SIZE(c0_can_rxd_trnsv1_grp_pad_mux),
2157 .pad_mux_list = c0_can_rxd_trnsv1_grp_pad_mux,
2158};
2159
2160static struct atlas7_pad_mux c0_can_txd_trnsv0_grp_pad_mux[] = {
1574 MUX(0, 10, 1, N, N, N, N), 2161 MUX(0, 10, 1, N, N, N, N),
1575}; 2162};
1576 2163
1577static struct atlas7_grp_mux c0_can_grp0_mux = { 2164static struct atlas7_grp_mux c0_can_txd_trnsv0_grp_mux = {
1578 .pad_mux_count = ARRAY_SIZE(c0_can_grp0_pad_mux), 2165 .pad_mux_count = ARRAY_SIZE(c0_can_txd_trnsv0_grp_pad_mux),
1579 .pad_mux_list = c0_can_grp0_pad_mux, 2166 .pad_mux_list = c0_can_txd_trnsv0_grp_pad_mux,
1580}; 2167};
1581 2168
1582static struct atlas7_pad_mux c0_can_grp1_pad_mux[] = { 2169static struct atlas7_pad_mux c0_can_txd_trnsv1_grp_pad_mux[] = {
1583 MUX(0, 2, 5, 0xa08, 9, 0xa88, 9),
1584 MUX(0, 3, 5, N, N, N, N), 2170 MUX(0, 3, 5, N, N, N, N),
1585}; 2171};
1586 2172
1587static struct atlas7_grp_mux c0_can_grp1_mux = { 2173static struct atlas7_grp_mux c0_can_txd_trnsv1_grp_mux = {
1588 .pad_mux_count = ARRAY_SIZE(c0_can_grp1_pad_mux), 2174 .pad_mux_count = ARRAY_SIZE(c0_can_txd_trnsv1_grp_pad_mux),
1589 .pad_mux_list = c0_can_grp1_pad_mux, 2175 .pad_mux_list = c0_can_txd_trnsv1_grp_pad_mux,
1590}; 2176};
1591 2177
1592static struct atlas7_pad_mux c1_can_grp0_pad_mux[] = { 2178static struct atlas7_pad_mux c1_can_rxd_grp0_pad_mux[] = {
1593 MUX(1, 138, 2, 0xa00, 4, 0xa80, 4), 2179 MUX(1, 138, 2, 0xa00, 4, 0xa80, 4),
1594 MUX(1, 137, 2, N, N, N, N),
1595}; 2180};
1596 2181
1597static struct atlas7_grp_mux c1_can_grp0_mux = { 2182static struct atlas7_grp_mux c1_can_rxd_grp0_mux = {
1598 .pad_mux_count = ARRAY_SIZE(c1_can_grp0_pad_mux), 2183 .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp0_pad_mux),
1599 .pad_mux_list = c1_can_grp0_pad_mux, 2184 .pad_mux_list = c1_can_rxd_grp0_pad_mux,
1600}; 2185};
1601 2186
1602static struct atlas7_pad_mux c1_can_grp1_pad_mux[] = { 2187static struct atlas7_pad_mux c1_can_rxd_grp1_pad_mux[] = {
1603 MUX(1, 147, 2, 0xa00, 4, 0xa80, 4), 2188 MUX(1, 147, 2, 0xa00, 4, 0xa80, 4),
1604 MUX(1, 146, 2, N, N, N, N),
1605}; 2189};
1606 2190
1607static struct atlas7_grp_mux c1_can_grp1_mux = { 2191static struct atlas7_grp_mux c1_can_rxd_grp1_mux = {
1608 .pad_mux_count = ARRAY_SIZE(c1_can_grp1_pad_mux), 2192 .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp1_pad_mux),
1609 .pad_mux_list = c1_can_grp1_pad_mux, 2193 .pad_mux_list = c1_can_rxd_grp1_pad_mux,
1610}; 2194};
1611 2195
1612static struct atlas7_pad_mux c1_can_grp2_pad_mux[] = { 2196static struct atlas7_pad_mux c1_can_rxd_grp2_pad_mux[] = {
1613 MUX(0, 2, 2, 0xa00, 4, 0xa80, 4), 2197 MUX(0, 2, 2, 0xa00, 4, 0xa80, 4),
2198};
2199
2200static struct atlas7_grp_mux c1_can_rxd_grp2_mux = {
2201 .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp2_pad_mux),
2202 .pad_mux_list = c1_can_rxd_grp2_pad_mux,
2203};
2204
2205static struct atlas7_pad_mux c1_can_rxd_grp3_pad_mux[] = {
2206 MUX(1, 162, 4, 0xa00, 4, 0xa80, 4),
2207};
2208
2209static struct atlas7_grp_mux c1_can_rxd_grp3_mux = {
2210 .pad_mux_count = ARRAY_SIZE(c1_can_rxd_grp3_pad_mux),
2211 .pad_mux_list = c1_can_rxd_grp3_pad_mux,
2212};
2213
2214static struct atlas7_pad_mux c1_can_txd_grp0_pad_mux[] = {
2215 MUX(1, 137, 2, N, N, N, N),
2216};
2217
2218static struct atlas7_grp_mux c1_can_txd_grp0_mux = {
2219 .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp0_pad_mux),
2220 .pad_mux_list = c1_can_txd_grp0_pad_mux,
2221};
2222
2223static struct atlas7_pad_mux c1_can_txd_grp1_pad_mux[] = {
2224 MUX(1, 146, 2, N, N, N, N),
2225};
2226
2227static struct atlas7_grp_mux c1_can_txd_grp1_mux = {
2228 .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp1_pad_mux),
2229 .pad_mux_list = c1_can_txd_grp1_pad_mux,
2230};
2231
2232static struct atlas7_pad_mux c1_can_txd_grp2_pad_mux[] = {
1614 MUX(0, 3, 2, N, N, N, N), 2233 MUX(0, 3, 2, N, N, N, N),
1615}; 2234};
1616 2235
1617static struct atlas7_grp_mux c1_can_grp2_mux = { 2236static struct atlas7_grp_mux c1_can_txd_grp2_mux = {
1618 .pad_mux_count = ARRAY_SIZE(c1_can_grp2_pad_mux), 2237 .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp2_pad_mux),
1619 .pad_mux_list = c1_can_grp2_pad_mux, 2238 .pad_mux_list = c1_can_txd_grp2_pad_mux,
2239};
2240
2241static struct atlas7_pad_mux c1_can_txd_grp3_pad_mux[] = {
2242 MUX(1, 161, 4, N, N, N, N),
2243};
2244
2245static struct atlas7_grp_mux c1_can_txd_grp3_mux = {
2246 .pad_mux_count = ARRAY_SIZE(c1_can_txd_grp3_pad_mux),
2247 .pad_mux_list = c1_can_txd_grp3_pad_mux,
1620}; 2248};
1621 2249
1622static struct atlas7_pad_mux ca_audio_lpc_grp_pad_mux[] = { 2250static struct atlas7_pad_mux ca_audio_lpc_grp_pad_mux[] = {
@@ -2198,18 +2826,215 @@ static struct atlas7_grp_mux i2c1_grp_mux = {
2198 .pad_mux_list = i2c1_grp_pad_mux, 2826 .pad_mux_list = i2c1_grp_pad_mux,
2199}; 2827};
2200 2828
2201static struct atlas7_pad_mux jtag_grp0_pad_mux[] = { 2829static struct atlas7_pad_mux i2s0_grp_pad_mux[] = {
2830 MUX(1, 91, 2, 0xa10, 12, 0xa90, 12),
2831 MUX(1, 93, 2, 0xa10, 13, 0xa90, 13),
2832 MUX(1, 94, 2, 0xa10, 14, 0xa90, 14),
2833 MUX(1, 92, 2, 0xa10, 15, 0xa90, 15),
2834};
2835
2836static struct atlas7_grp_mux i2s0_grp_mux = {
2837 .pad_mux_count = ARRAY_SIZE(i2s0_grp_pad_mux),
2838 .pad_mux_list = i2s0_grp_pad_mux,
2839};
2840
2841static struct atlas7_pad_mux i2s1_basic_grp_pad_mux[] = {
2842 MUX(1, 95, 2, 0xa10, 16, 0xa90, 16),
2843 MUX(1, 96, 2, 0xa10, 19, 0xa90, 19),
2844};
2845
2846static struct atlas7_grp_mux i2s1_basic_grp_mux = {
2847 .pad_mux_count = ARRAY_SIZE(i2s1_basic_grp_pad_mux),
2848 .pad_mux_list = i2s1_basic_grp_pad_mux,
2849};
2850
2851static struct atlas7_pad_mux i2s1_rxd0_grp0_pad_mux[] = {
2852 MUX(1, 61, 4, 0xa10, 17, 0xa90, 17),
2853};
2854
2855static struct atlas7_grp_mux i2s1_rxd0_grp0_mux = {
2856 .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp0_pad_mux),
2857 .pad_mux_list = i2s1_rxd0_grp0_pad_mux,
2858};
2859
2860static struct atlas7_pad_mux i2s1_rxd0_grp1_pad_mux[] = {
2861 MUX(1, 131, 4, 0xa10, 17, 0xa90, 17),
2862};
2863
2864static struct atlas7_grp_mux i2s1_rxd0_grp1_mux = {
2865 .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp1_pad_mux),
2866 .pad_mux_list = i2s1_rxd0_grp1_pad_mux,
2867};
2868
2869static struct atlas7_pad_mux i2s1_rxd0_grp2_pad_mux[] = {
2870 MUX(1, 129, 2, 0xa10, 17, 0xa90, 17),
2871};
2872
2873static struct atlas7_grp_mux i2s1_rxd0_grp2_mux = {
2874 .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp2_pad_mux),
2875 .pad_mux_list = i2s1_rxd0_grp2_pad_mux,
2876};
2877
2878static struct atlas7_pad_mux i2s1_rxd0_grp3_pad_mux[] = {
2879 MUX(1, 117, 7, 0xa10, 17, 0xa90, 17),
2880};
2881
2882static struct atlas7_grp_mux i2s1_rxd0_grp3_mux = {
2883 .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp3_pad_mux),
2884 .pad_mux_list = i2s1_rxd0_grp3_pad_mux,
2885};
2886
2887static struct atlas7_pad_mux i2s1_rxd0_grp4_pad_mux[] = {
2888 MUX(1, 83, 4, 0xa10, 17, 0xa90, 17),
2889};
2890
2891static struct atlas7_grp_mux i2s1_rxd0_grp4_mux = {
2892 .pad_mux_count = ARRAY_SIZE(i2s1_rxd0_grp4_pad_mux),
2893 .pad_mux_list = i2s1_rxd0_grp4_pad_mux,
2894};
2895
2896static struct atlas7_pad_mux i2s1_rxd1_grp0_pad_mux[] = {
2897 MUX(1, 72, 4, 0xa10, 18, 0xa90, 18),
2898};
2899
2900static struct atlas7_grp_mux i2s1_rxd1_grp0_mux = {
2901 .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp0_pad_mux),
2902 .pad_mux_list = i2s1_rxd1_grp0_pad_mux,
2903};
2904
2905static struct atlas7_pad_mux i2s1_rxd1_grp1_pad_mux[] = {
2906 MUX(1, 132, 4, 0xa10, 18, 0xa90, 18),
2907};
2908
2909static struct atlas7_grp_mux i2s1_rxd1_grp1_mux = {
2910 .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp1_pad_mux),
2911 .pad_mux_list = i2s1_rxd1_grp1_pad_mux,
2912};
2913
2914static struct atlas7_pad_mux i2s1_rxd1_grp2_pad_mux[] = {
2915 MUX(1, 130, 2, 0xa10, 18, 0xa90, 18),
2916};
2917
2918static struct atlas7_grp_mux i2s1_rxd1_grp2_mux = {
2919 .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp2_pad_mux),
2920 .pad_mux_list = i2s1_rxd1_grp2_pad_mux,
2921};
2922
2923static struct atlas7_pad_mux i2s1_rxd1_grp3_pad_mux[] = {
2924 MUX(1, 118, 7, 0xa10, 18, 0xa90, 18),
2925};
2926
2927static struct atlas7_grp_mux i2s1_rxd1_grp3_mux = {
2928 .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp3_pad_mux),
2929 .pad_mux_list = i2s1_rxd1_grp3_pad_mux,
2930};
2931
2932static struct atlas7_pad_mux i2s1_rxd1_grp4_pad_mux[] = {
2933 MUX(1, 84, 4, 0xa10, 18, 0xa90, 18),
2934};
2935
2936static struct atlas7_grp_mux i2s1_rxd1_grp4_mux = {
2937 .pad_mux_count = ARRAY_SIZE(i2s1_rxd1_grp4_pad_mux),
2938 .pad_mux_list = i2s1_rxd1_grp4_pad_mux,
2939};
2940
2941static struct atlas7_pad_mux jtag_jt_dbg_nsrst_grp_pad_mux[] = {
2202 MUX(1, 125, 5, 0xa08, 2, 0xa88, 2), 2942 MUX(1, 125, 5, 0xa08, 2, 0xa88, 2),
2943};
2944
2945static struct atlas7_grp_mux jtag_jt_dbg_nsrst_grp_mux = {
2946 .pad_mux_count = ARRAY_SIZE(jtag_jt_dbg_nsrst_grp_pad_mux),
2947 .pad_mux_list = jtag_jt_dbg_nsrst_grp_pad_mux,
2948};
2949
2950static struct atlas7_pad_mux jtag_ntrst_grp0_pad_mux[] = {
2203 MUX(0, 4, 3, 0xa08, 3, 0xa88, 3), 2951 MUX(0, 4, 3, 0xa08, 3, 0xa88, 3),
2204 MUX(0, 2, 3, N, N, N, N), 2952};
2205 MUX(0, 0, 3, N, N, N, N), 2953
2206 MUX(0, 1, 3, N, N, N, N), 2954static struct atlas7_grp_mux jtag_ntrst_grp0_mux = {
2955 .pad_mux_count = ARRAY_SIZE(jtag_ntrst_grp0_pad_mux),
2956 .pad_mux_list = jtag_ntrst_grp0_pad_mux,
2957};
2958
2959static struct atlas7_pad_mux jtag_ntrst_grp1_pad_mux[] = {
2960 MUX(1, 163, 1, 0xa08, 3, 0xa88, 3),
2961};
2962
2963static struct atlas7_grp_mux jtag_ntrst_grp1_mux = {
2964 .pad_mux_count = ARRAY_SIZE(jtag_ntrst_grp1_pad_mux),
2965 .pad_mux_list = jtag_ntrst_grp1_pad_mux,
2966};
2967
2968static struct atlas7_pad_mux jtag_swdiotms_grp0_pad_mux[] = {
2969 MUX(0, 2, 3, 0xa10, 10, 0xa90, 10),
2970};
2971
2972static struct atlas7_grp_mux jtag_swdiotms_grp0_mux = {
2973 .pad_mux_count = ARRAY_SIZE(jtag_swdiotms_grp0_pad_mux),
2974 .pad_mux_list = jtag_swdiotms_grp0_pad_mux,
2975};
2976
2977static struct atlas7_pad_mux jtag_swdiotms_grp1_pad_mux[] = {
2978 MUX(1, 160, 1, 0xa10, 10, 0xa90, 10),
2979};
2980
2981static struct atlas7_grp_mux jtag_swdiotms_grp1_mux = {
2982 .pad_mux_count = ARRAY_SIZE(jtag_swdiotms_grp1_pad_mux),
2983 .pad_mux_list = jtag_swdiotms_grp1_pad_mux,
2984};
2985
2986static struct atlas7_pad_mux jtag_tck_grp0_pad_mux[] = {
2987 MUX(0, 0, 3, 0xa10, 11, 0xa90, 11),
2988};
2989
2990static struct atlas7_grp_mux jtag_tck_grp0_mux = {
2991 .pad_mux_count = ARRAY_SIZE(jtag_tck_grp0_pad_mux),
2992 .pad_mux_list = jtag_tck_grp0_pad_mux,
2993};
2994
2995static struct atlas7_pad_mux jtag_tck_grp1_pad_mux[] = {
2996 MUX(1, 161, 1, 0xa10, 11, 0xa90, 11),
2997};
2998
2999static struct atlas7_grp_mux jtag_tck_grp1_mux = {
3000 .pad_mux_count = ARRAY_SIZE(jtag_tck_grp1_pad_mux),
3001 .pad_mux_list = jtag_tck_grp1_pad_mux,
3002};
3003
3004static struct atlas7_pad_mux jtag_tdi_grp0_pad_mux[] = {
3005 MUX(0, 1, 3, 0xa10, 31, 0xa90, 31),
3006};
3007
3008static struct atlas7_grp_mux jtag_tdi_grp0_mux = {
3009 .pad_mux_count = ARRAY_SIZE(jtag_tdi_grp0_pad_mux),
3010 .pad_mux_list = jtag_tdi_grp0_pad_mux,
3011};
3012
3013static struct atlas7_pad_mux jtag_tdi_grp1_pad_mux[] = {
3014 MUX(1, 162, 1, 0xa10, 31, 0xa90, 31),
3015};
3016
3017static struct atlas7_grp_mux jtag_tdi_grp1_mux = {
3018 .pad_mux_count = ARRAY_SIZE(jtag_tdi_grp1_pad_mux),
3019 .pad_mux_list = jtag_tdi_grp1_pad_mux,
3020};
3021
3022static struct atlas7_pad_mux jtag_tdo_grp0_pad_mux[] = {
2207 MUX(0, 3, 3, N, N, N, N), 3023 MUX(0, 3, 3, N, N, N, N),
2208}; 3024};
2209 3025
2210static struct atlas7_grp_mux jtag_grp0_mux = { 3026static struct atlas7_grp_mux jtag_tdo_grp0_mux = {
2211 .pad_mux_count = ARRAY_SIZE(jtag_grp0_pad_mux), 3027 .pad_mux_count = ARRAY_SIZE(jtag_tdo_grp0_pad_mux),
2212 .pad_mux_list = jtag_grp0_pad_mux, 3028 .pad_mux_list = jtag_tdo_grp0_pad_mux,
3029};
3030
3031static struct atlas7_pad_mux jtag_tdo_grp1_pad_mux[] = {
3032 MUX(1, 159, 1, N, N, N, N),
3033};
3034
3035static struct atlas7_grp_mux jtag_tdo_grp1_mux = {
3036 .pad_mux_count = ARRAY_SIZE(jtag_tdo_grp1_pad_mux),
3037 .pad_mux_list = jtag_tdo_grp1_pad_mux,
2213}; 3038};
2214 3039
2215static struct atlas7_pad_mux ks_kas_spi_grp0_pad_mux[] = { 3040static struct atlas7_pad_mux ks_kas_spi_grp0_pad_mux[] = {
@@ -2401,6 +3226,7 @@ static struct atlas7_grp_mux nd_df_nowp_grp_mux = {
2401static struct atlas7_pad_mux ps_grp_pad_mux[] = { 3226static struct atlas7_pad_mux ps_grp_pad_mux[] = {
2402 MUX(1, 120, 2, N, N, N, N), 3227 MUX(1, 120, 2, N, N, N, N),
2403 MUX(1, 119, 2, N, N, N, N), 3228 MUX(1, 119, 2, N, N, N, N),
3229 MUX(1, 121, 5, N, N, N, N),
2404}; 3230};
2405 3231
2406static struct atlas7_grp_mux ps_grp_mux = { 3232static struct atlas7_grp_mux ps_grp_mux = {
@@ -2534,6 +3360,15 @@ static struct atlas7_grp_mux pw_cko0_grp2_mux = {
2534 .pad_mux_list = pw_cko0_grp2_pad_mux, 3360 .pad_mux_list = pw_cko0_grp2_pad_mux,
2535}; 3361};
2536 3362
3363static struct atlas7_pad_mux pw_cko0_grp3_pad_mux[] = {
3364 MUX(1, 162, 5, N, N, N, N),
3365};
3366
3367static struct atlas7_grp_mux pw_cko0_grp3_mux = {
3368 .pad_mux_count = ARRAY_SIZE(pw_cko0_grp3_pad_mux),
3369 .pad_mux_list = pw_cko0_grp3_pad_mux,
3370};
3371
2537static struct atlas7_pad_mux pw_cko1_grp0_pad_mux[] = { 3372static struct atlas7_pad_mux pw_cko1_grp0_pad_mux[] = {
2538 MUX(1, 124, 3, N, N, N, N), 3373 MUX(1, 124, 3, N, N, N, N),
2539}; 3374};
@@ -2552,6 +3387,15 @@ static struct atlas7_grp_mux pw_cko1_grp1_mux = {
2552 .pad_mux_list = pw_cko1_grp1_pad_mux, 3387 .pad_mux_list = pw_cko1_grp1_pad_mux,
2553}; 3388};
2554 3389
3390static struct atlas7_pad_mux pw_cko1_grp2_pad_mux[] = {
3391 MUX(1, 163, 5, N, N, N, N),
3392};
3393
3394static struct atlas7_grp_mux pw_cko1_grp2_mux = {
3395 .pad_mux_count = ARRAY_SIZE(pw_cko1_grp2_pad_mux),
3396 .pad_mux_list = pw_cko1_grp2_pad_mux,
3397};
3398
2555static struct atlas7_pad_mux pw_i2s01_clk_grp0_pad_mux[] = { 3399static struct atlas7_pad_mux pw_i2s01_clk_grp0_pad_mux[] = {
2556 MUX(1, 125, 3, N, N, N, N), 3400 MUX(1, 125, 3, N, N, N, N),
2557}; 3401};
@@ -2570,22 +3414,58 @@ static struct atlas7_grp_mux pw_i2s01_clk_grp1_mux = {
2570 .pad_mux_list = pw_i2s01_clk_grp1_pad_mux, 3414 .pad_mux_list = pw_i2s01_clk_grp1_pad_mux,
2571}; 3415};
2572 3416
2573static struct atlas7_pad_mux pw_pwm0_grp_pad_mux[] = { 3417static struct atlas7_pad_mux pw_i2s01_clk_grp2_pad_mux[] = {
3418 MUX(1, 132, 2, N, N, N, N),
3419};
3420
3421static struct atlas7_grp_mux pw_i2s01_clk_grp2_mux = {
3422 .pad_mux_count = ARRAY_SIZE(pw_i2s01_clk_grp2_pad_mux),
3423 .pad_mux_list = pw_i2s01_clk_grp2_pad_mux,
3424};
3425
3426static struct atlas7_pad_mux pw_pwm0_grp0_pad_mux[] = {
2574 MUX(1, 119, 3, N, N, N, N), 3427 MUX(1, 119, 3, N, N, N, N),
2575}; 3428};
2576 3429
2577static struct atlas7_grp_mux pw_pwm0_grp_mux = { 3430static struct atlas7_grp_mux pw_pwm0_grp0_mux = {
2578 .pad_mux_count = ARRAY_SIZE(pw_pwm0_grp_pad_mux), 3431 .pad_mux_count = ARRAY_SIZE(pw_pwm0_grp0_pad_mux),
2579 .pad_mux_list = pw_pwm0_grp_pad_mux, 3432 .pad_mux_list = pw_pwm0_grp0_pad_mux,
3433};
3434
3435static struct atlas7_pad_mux pw_pwm0_grp1_pad_mux[] = {
3436 MUX(1, 159, 5, N, N, N, N),
3437};
3438
3439static struct atlas7_grp_mux pw_pwm0_grp1_mux = {
3440 .pad_mux_count = ARRAY_SIZE(pw_pwm0_grp1_pad_mux),
3441 .pad_mux_list = pw_pwm0_grp1_pad_mux,
2580}; 3442};
2581 3443
2582static struct atlas7_pad_mux pw_pwm1_grp_pad_mux[] = { 3444static struct atlas7_pad_mux pw_pwm1_grp0_pad_mux[] = {
2583 MUX(1, 120, 3, N, N, N, N), 3445 MUX(1, 120, 3, N, N, N, N),
2584}; 3446};
2585 3447
2586static struct atlas7_grp_mux pw_pwm1_grp_mux = { 3448static struct atlas7_grp_mux pw_pwm1_grp0_mux = {
2587 .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp_pad_mux), 3449 .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp0_pad_mux),
2588 .pad_mux_list = pw_pwm1_grp_pad_mux, 3450 .pad_mux_list = pw_pwm1_grp0_pad_mux,
3451};
3452
3453static struct atlas7_pad_mux pw_pwm1_grp1_pad_mux[] = {
3454 MUX(1, 160, 5, N, N, N, N),
3455};
3456
3457static struct atlas7_grp_mux pw_pwm1_grp1_mux = {
3458 .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp1_pad_mux),
3459 .pad_mux_list = pw_pwm1_grp1_pad_mux,
3460};
3461
3462static struct atlas7_pad_mux pw_pwm1_grp2_pad_mux[] = {
3463 MUX(1, 131, 2, N, N, N, N),
3464};
3465
3466static struct atlas7_grp_mux pw_pwm1_grp2_mux = {
3467 .pad_mux_count = ARRAY_SIZE(pw_pwm1_grp2_pad_mux),
3468 .pad_mux_list = pw_pwm1_grp2_pad_mux,
2589}; 3469};
2590 3470
2591static struct atlas7_pad_mux pw_pwm2_grp0_pad_mux[] = { 3471static struct atlas7_pad_mux pw_pwm2_grp0_pad_mux[] = {
@@ -2606,6 +3486,15 @@ static struct atlas7_grp_mux pw_pwm2_grp1_mux = {
2606 .pad_mux_list = pw_pwm2_grp1_pad_mux, 3486 .pad_mux_list = pw_pwm2_grp1_pad_mux,
2607}; 3487};
2608 3488
3489static struct atlas7_pad_mux pw_pwm2_grp2_pad_mux[] = {
3490 MUX(1, 161, 5, N, N, N, N),
3491};
3492
3493static struct atlas7_grp_mux pw_pwm2_grp2_mux = {
3494 .pad_mux_count = ARRAY_SIZE(pw_pwm2_grp2_pad_mux),
3495 .pad_mux_list = pw_pwm2_grp2_pad_mux,
3496};
3497
2609static struct atlas7_pad_mux pw_pwm3_grp0_pad_mux[] = { 3498static struct atlas7_pad_mux pw_pwm3_grp0_pad_mux[] = {
2610 MUX(1, 122, 3, N, N, N, N), 3499 MUX(1, 122, 3, N, N, N, N),
2611}; 3500};
@@ -2642,6 +3531,15 @@ static struct atlas7_grp_mux pw_pwm_cpu_vol_grp1_mux = {
2642 .pad_mux_list = pw_pwm_cpu_vol_grp1_pad_mux, 3531 .pad_mux_list = pw_pwm_cpu_vol_grp1_pad_mux,
2643}; 3532};
2644 3533
3534static struct atlas7_pad_mux pw_pwm_cpu_vol_grp2_pad_mux[] = {
3535 MUX(1, 161, 5, N, N, N, N),
3536};
3537
3538static struct atlas7_grp_mux pw_pwm_cpu_vol_grp2_mux = {
3539 .pad_mux_count = ARRAY_SIZE(pw_pwm_cpu_vol_grp2_pad_mux),
3540 .pad_mux_list = pw_pwm_cpu_vol_grp2_pad_mux,
3541};
3542
2645static struct atlas7_pad_mux pw_backlight_grp0_pad_mux[] = { 3543static struct atlas7_pad_mux pw_backlight_grp0_pad_mux[] = {
2646 MUX(1, 122, 3, N, N, N, N), 3544 MUX(1, 122, 3, N, N, N, N),
2647}; 3545};
@@ -2795,35 +3693,54 @@ static struct atlas7_grp_mux sd1_4bit_grp1_mux = {
2795 .pad_mux_list = sd1_4bit_grp1_pad_mux, 3693 .pad_mux_list = sd1_4bit_grp1_pad_mux,
2796}; 3694};
2797 3695
2798static struct atlas7_pad_mux sd2_grp0_pad_mux[] = { 3696static struct atlas7_pad_mux sd2_basic_grp_pad_mux[] = {
2799 MUX(1, 124, 2, 0xa08, 7, 0xa88, 7),
2800 MUX(1, 31, 1, N, N, N, N), 3697 MUX(1, 31, 1, N, N, N, N),
2801 MUX(1, 32, 1, N, N, N, N), 3698 MUX(1, 32, 1, N, N, N, N),
2802 MUX(1, 33, 1, N, N, N, N), 3699 MUX(1, 33, 1, N, N, N, N),
2803 MUX(1, 34, 1, N, N, N, N), 3700 MUX(1, 34, 1, N, N, N, N),
2804 MUX(1, 35, 1, N, N, N, N), 3701 MUX(1, 35, 1, N, N, N, N),
2805 MUX(1, 36, 1, N, N, N, N), 3702 MUX(1, 36, 1, N, N, N, N),
2806 MUX(1, 123, 2, N, N, N, N),
2807}; 3703};
2808 3704
2809static struct atlas7_grp_mux sd2_grp0_mux = { 3705static struct atlas7_grp_mux sd2_basic_grp_mux = {
2810 .pad_mux_count = ARRAY_SIZE(sd2_grp0_pad_mux), 3706 .pad_mux_count = ARRAY_SIZE(sd2_basic_grp_pad_mux),
2811 .pad_mux_list = sd2_grp0_pad_mux, 3707 .pad_mux_list = sd2_basic_grp_pad_mux,
2812}; 3708};
2813 3709
2814static struct atlas7_pad_mux sd2_no_cdb_grp0_pad_mux[] = { 3710static struct atlas7_pad_mux sd2_cdb_grp0_pad_mux[] = {
2815 MUX(1, 31, 1, N, N, N, N), 3711 MUX(1, 124, 2, 0xa08, 7, 0xa88, 7),
2816 MUX(1, 32, 1, N, N, N, N),
2817 MUX(1, 33, 1, N, N, N, N),
2818 MUX(1, 34, 1, N, N, N, N),
2819 MUX(1, 35, 1, N, N, N, N),
2820 MUX(1, 36, 1, N, N, N, N),
2821 MUX(1, 123, 2, N, N, N, N),
2822}; 3712};
2823 3713
2824static struct atlas7_grp_mux sd2_no_cdb_grp0_mux = { 3714static struct atlas7_grp_mux sd2_cdb_grp0_mux = {
2825 .pad_mux_count = ARRAY_SIZE(sd2_no_cdb_grp0_pad_mux), 3715 .pad_mux_count = ARRAY_SIZE(sd2_cdb_grp0_pad_mux),
2826 .pad_mux_list = sd2_no_cdb_grp0_pad_mux, 3716 .pad_mux_list = sd2_cdb_grp0_pad_mux,
3717};
3718
3719static struct atlas7_pad_mux sd2_cdb_grp1_pad_mux[] = {
3720 MUX(1, 161, 6, 0xa08, 7, 0xa88, 7),
3721};
3722
3723static struct atlas7_grp_mux sd2_cdb_grp1_mux = {
3724 .pad_mux_count = ARRAY_SIZE(sd2_cdb_grp1_pad_mux),
3725 .pad_mux_list = sd2_cdb_grp1_pad_mux,
3726};
3727
3728static struct atlas7_pad_mux sd2_wpb_grp0_pad_mux[] = {
3729 MUX(1, 123, 2, 0xa10, 6, 0xa90, 6),
3730};
3731
3732static struct atlas7_grp_mux sd2_wpb_grp0_mux = {
3733 .pad_mux_count = ARRAY_SIZE(sd2_wpb_grp0_pad_mux),
3734 .pad_mux_list = sd2_wpb_grp0_pad_mux,
3735};
3736
3737static struct atlas7_pad_mux sd2_wpb_grp1_pad_mux[] = {
3738 MUX(1, 163, 7, 0xa10, 6, 0xa90, 6),
3739};
3740
3741static struct atlas7_grp_mux sd2_wpb_grp1_mux = {
3742 .pad_mux_count = ARRAY_SIZE(sd2_wpb_grp1_pad_mux),
3743 .pad_mux_list = sd2_wpb_grp1_pad_mux,
2827}; 3744};
2828 3745
2829static struct atlas7_pad_mux sd3_grp_pad_mux[] = { 3746static struct atlas7_pad_mux sd3_grp_pad_mux[] = {
@@ -2975,146 +3892,302 @@ static struct atlas7_grp_mux uart1_grp_mux = {
2975 .pad_mux_list = uart1_grp_pad_mux, 3892 .pad_mux_list = uart1_grp_pad_mux,
2976}; 3893};
2977 3894
2978static struct atlas7_pad_mux uart2_grp_pad_mux[] = { 3895static struct atlas7_pad_mux uart2_cts_grp0_pad_mux[] = {
2979 MUX(0, 11, 2, N, N, N, N), 3896 MUX(1, 132, 3, 0xa10, 2, 0xa90, 2),
3897};
3898
3899static struct atlas7_grp_mux uart2_cts_grp0_mux = {
3900 .pad_mux_count = ARRAY_SIZE(uart2_cts_grp0_pad_mux),
3901 .pad_mux_list = uart2_cts_grp0_pad_mux,
3902};
3903
3904static struct atlas7_pad_mux uart2_cts_grp1_pad_mux[] = {
3905 MUX(1, 162, 2, 0xa10, 2, 0xa90, 2),
3906};
3907
3908static struct atlas7_grp_mux uart2_cts_grp1_mux = {
3909 .pad_mux_count = ARRAY_SIZE(uart2_cts_grp1_pad_mux),
3910 .pad_mux_list = uart2_cts_grp1_pad_mux,
3911};
3912
3913static struct atlas7_pad_mux uart2_rts_grp0_pad_mux[] = {
3914 MUX(1, 131, 3, N, N, N, N),
3915};
3916
3917static struct atlas7_grp_mux uart2_rts_grp0_mux = {
3918 .pad_mux_count = ARRAY_SIZE(uart2_rts_grp0_pad_mux),
3919 .pad_mux_list = uart2_rts_grp0_pad_mux,
3920};
3921
3922static struct atlas7_pad_mux uart2_rts_grp1_pad_mux[] = {
3923 MUX(1, 161, 2, N, N, N, N),
3924};
3925
3926static struct atlas7_grp_mux uart2_rts_grp1_mux = {
3927 .pad_mux_count = ARRAY_SIZE(uart2_rts_grp1_pad_mux),
3928 .pad_mux_list = uart2_rts_grp1_pad_mux,
3929};
3930
3931static struct atlas7_pad_mux uart2_rxd_grp0_pad_mux[] = {
3932 MUX(0, 11, 2, 0xa10, 5, 0xa90, 5),
3933};
3934
3935static struct atlas7_grp_mux uart2_rxd_grp0_mux = {
3936 .pad_mux_count = ARRAY_SIZE(uart2_rxd_grp0_pad_mux),
3937 .pad_mux_list = uart2_rxd_grp0_pad_mux,
3938};
3939
3940static struct atlas7_pad_mux uart2_rxd_grp1_pad_mux[] = {
3941 MUX(1, 160, 2, 0xa10, 5, 0xa90, 5),
3942};
3943
3944static struct atlas7_grp_mux uart2_rxd_grp1_mux = {
3945 .pad_mux_count = ARRAY_SIZE(uart2_rxd_grp1_pad_mux),
3946 .pad_mux_list = uart2_rxd_grp1_pad_mux,
3947};
3948
3949static struct atlas7_pad_mux uart2_rxd_grp2_pad_mux[] = {
3950 MUX(1, 130, 3, 0xa10, 5, 0xa90, 5),
3951};
3952
3953static struct atlas7_grp_mux uart2_rxd_grp2_mux = {
3954 .pad_mux_count = ARRAY_SIZE(uart2_rxd_grp2_pad_mux),
3955 .pad_mux_list = uart2_rxd_grp2_pad_mux,
3956};
3957
3958static struct atlas7_pad_mux uart2_txd_grp0_pad_mux[] = {
2980 MUX(0, 10, 2, N, N, N, N), 3959 MUX(0, 10, 2, N, N, N, N),
2981}; 3960};
2982 3961
2983static struct atlas7_grp_mux uart2_grp_mux = { 3962static struct atlas7_grp_mux uart2_txd_grp0_mux = {
2984 .pad_mux_count = ARRAY_SIZE(uart2_grp_pad_mux), 3963 .pad_mux_count = ARRAY_SIZE(uart2_txd_grp0_pad_mux),
2985 .pad_mux_list = uart2_grp_pad_mux, 3964 .pad_mux_list = uart2_txd_grp0_pad_mux,
3965};
3966
3967static struct atlas7_pad_mux uart2_txd_grp1_pad_mux[] = {
3968 MUX(1, 159, 2, N, N, N, N),
2986}; 3969};
2987 3970
2988static struct atlas7_pad_mux uart3_grp0_pad_mux[] = { 3971static struct atlas7_grp_mux uart2_txd_grp1_mux = {
3972 .pad_mux_count = ARRAY_SIZE(uart2_txd_grp1_pad_mux),
3973 .pad_mux_list = uart2_txd_grp1_pad_mux,
3974};
3975
3976static struct atlas7_pad_mux uart2_txd_grp2_pad_mux[] = {
3977 MUX(1, 129, 3, N, N, N, N),
3978};
3979
3980static struct atlas7_grp_mux uart2_txd_grp2_mux = {
3981 .pad_mux_count = ARRAY_SIZE(uart2_txd_grp2_pad_mux),
3982 .pad_mux_list = uart2_txd_grp2_pad_mux,
3983};
3984
3985static struct atlas7_pad_mux uart3_cts_grp0_pad_mux[] = {
2989 MUX(1, 125, 2, 0xa08, 0, 0xa88, 0), 3986 MUX(1, 125, 2, 0xa08, 0, 0xa88, 0),
2990 MUX(1, 126, 2, N, N, N, N),
2991 MUX(1, 138, 1, 0xa00, 5, 0xa80, 5),
2992 MUX(1, 137, 1, N, N, N, N),
2993}; 3987};
2994 3988
2995static struct atlas7_grp_mux uart3_grp0_mux = { 3989static struct atlas7_grp_mux uart3_cts_grp0_mux = {
2996 .pad_mux_count = ARRAY_SIZE(uart3_grp0_pad_mux), 3990 .pad_mux_count = ARRAY_SIZE(uart3_cts_grp0_pad_mux),
2997 .pad_mux_list = uart3_grp0_pad_mux, 3991 .pad_mux_list = uart3_cts_grp0_pad_mux,
2998}; 3992};
2999 3993
3000static struct atlas7_pad_mux uart3_grp1_pad_mux[] = { 3994static struct atlas7_pad_mux uart3_cts_grp1_pad_mux[] = {
3001 MUX(1, 111, 4, 0xa08, 0, 0xa88, 0), 3995 MUX(1, 111, 4, 0xa08, 0, 0xa88, 0),
3002 MUX(1, 109, 4, N, N, N, N),
3003 MUX(1, 84, 2, 0xa00, 5, 0xa80, 5),
3004 MUX(1, 83, 2, N, N, N, N),
3005}; 3996};
3006 3997
3007static struct atlas7_grp_mux uart3_grp1_mux = { 3998static struct atlas7_grp_mux uart3_cts_grp1_mux = {
3008 .pad_mux_count = ARRAY_SIZE(uart3_grp1_pad_mux), 3999 .pad_mux_count = ARRAY_SIZE(uart3_cts_grp1_pad_mux),
3009 .pad_mux_list = uart3_grp1_pad_mux, 4000 .pad_mux_list = uart3_cts_grp1_pad_mux,
3010}; 4001};
3011 4002
3012static struct atlas7_pad_mux uart3_grp2_pad_mux[] = { 4003static struct atlas7_pad_mux uart3_cts_grp2_pad_mux[] = {
3013 MUX(1, 140, 2, 0xa08, 0, 0xa88, 0), 4004 MUX(1, 140, 2, 0xa08, 0, 0xa88, 0),
4005};
4006
4007static struct atlas7_grp_mux uart3_cts_grp2_mux = {
4008 .pad_mux_count = ARRAY_SIZE(uart3_cts_grp2_pad_mux),
4009 .pad_mux_list = uart3_cts_grp2_pad_mux,
4010};
4011
4012static struct atlas7_pad_mux uart3_rts_grp0_pad_mux[] = {
4013 MUX(1, 126, 2, N, N, N, N),
4014};
4015
4016static struct atlas7_grp_mux uart3_rts_grp0_mux = {
4017 .pad_mux_count = ARRAY_SIZE(uart3_rts_grp0_pad_mux),
4018 .pad_mux_list = uart3_rts_grp0_pad_mux,
4019};
4020
4021static struct atlas7_pad_mux uart3_rts_grp1_pad_mux[] = {
4022 MUX(1, 109, 4, N, N, N, N),
4023};
4024
4025static struct atlas7_grp_mux uart3_rts_grp1_mux = {
4026 .pad_mux_count = ARRAY_SIZE(uart3_rts_grp1_pad_mux),
4027 .pad_mux_list = uart3_rts_grp1_pad_mux,
4028};
4029
4030static struct atlas7_pad_mux uart3_rts_grp2_pad_mux[] = {
3014 MUX(1, 139, 2, N, N, N, N), 4031 MUX(1, 139, 2, N, N, N, N),
4032};
4033
4034static struct atlas7_grp_mux uart3_rts_grp2_mux = {
4035 .pad_mux_count = ARRAY_SIZE(uart3_rts_grp2_pad_mux),
4036 .pad_mux_list = uart3_rts_grp2_pad_mux,
4037};
4038
4039static struct atlas7_pad_mux uart3_rxd_grp0_pad_mux[] = {
3015 MUX(1, 138, 1, 0xa00, 5, 0xa80, 5), 4040 MUX(1, 138, 1, 0xa00, 5, 0xa80, 5),
3016 MUX(1, 137, 1, N, N, N, N),
3017}; 4041};
3018 4042
3019static struct atlas7_grp_mux uart3_grp2_mux = { 4043static struct atlas7_grp_mux uart3_rxd_grp0_mux = {
3020 .pad_mux_count = ARRAY_SIZE(uart3_grp2_pad_mux), 4044 .pad_mux_count = ARRAY_SIZE(uart3_rxd_grp0_pad_mux),
3021 .pad_mux_list = uart3_grp2_pad_mux, 4045 .pad_mux_list = uart3_rxd_grp0_pad_mux,
3022}; 4046};
3023 4047
3024static struct atlas7_pad_mux uart3_grp3_pad_mux[] = { 4048static struct atlas7_pad_mux uart3_rxd_grp1_pad_mux[] = {
3025 MUX(1, 139, 2, N, N, N, N),
3026 MUX(1, 140, 2, 0xa08, 0, 0xa88, 0),
3027 MUX(1, 84, 2, 0xa00, 5, 0xa80, 5), 4049 MUX(1, 84, 2, 0xa00, 5, 0xa80, 5),
3028 MUX(1, 83, 2, N, N, N, N),
3029}; 4050};
3030 4051
3031static struct atlas7_grp_mux uart3_grp3_mux = { 4052static struct atlas7_grp_mux uart3_rxd_grp1_mux = {
3032 .pad_mux_count = ARRAY_SIZE(uart3_grp3_pad_mux), 4053 .pad_mux_count = ARRAY_SIZE(uart3_rxd_grp1_pad_mux),
3033 .pad_mux_list = uart3_grp3_pad_mux, 4054 .pad_mux_list = uart3_rxd_grp1_pad_mux,
3034}; 4055};
3035 4056
3036static struct atlas7_pad_mux uart3_nopause_grp0_pad_mux[] = { 4057static struct atlas7_pad_mux uart3_rxd_grp2_pad_mux[] = {
3037 MUX(1, 138, 1, 0xa00, 5, 0xa80, 5), 4058 MUX(1, 162, 3, 0xa00, 5, 0xa80, 5),
4059};
4060
4061static struct atlas7_grp_mux uart3_rxd_grp2_mux = {
4062 .pad_mux_count = ARRAY_SIZE(uart3_rxd_grp2_pad_mux),
4063 .pad_mux_list = uart3_rxd_grp2_pad_mux,
4064};
4065
4066static struct atlas7_pad_mux uart3_txd_grp0_pad_mux[] = {
3038 MUX(1, 137, 1, N, N, N, N), 4067 MUX(1, 137, 1, N, N, N, N),
3039}; 4068};
3040 4069
3041static struct atlas7_grp_mux uart3_nopause_grp0_mux = { 4070static struct atlas7_grp_mux uart3_txd_grp0_mux = {
3042 .pad_mux_count = ARRAY_SIZE(uart3_nopause_grp0_pad_mux), 4071 .pad_mux_count = ARRAY_SIZE(uart3_txd_grp0_pad_mux),
3043 .pad_mux_list = uart3_nopause_grp0_pad_mux, 4072 .pad_mux_list = uart3_txd_grp0_pad_mux,
3044}; 4073};
3045 4074
3046static struct atlas7_pad_mux uart3_nopause_grp1_pad_mux[] = { 4075static struct atlas7_pad_mux uart3_txd_grp1_pad_mux[] = {
3047 MUX(1, 84, 2, 0xa00, 5, 0xa80, 5),
3048 MUX(1, 83, 2, N, N, N, N), 4076 MUX(1, 83, 2, N, N, N, N),
3049}; 4077};
3050 4078
3051static struct atlas7_grp_mux uart3_nopause_grp1_mux = { 4079static struct atlas7_grp_mux uart3_txd_grp1_mux = {
3052 .pad_mux_count = ARRAY_SIZE(uart3_nopause_grp1_pad_mux), 4080 .pad_mux_count = ARRAY_SIZE(uart3_txd_grp1_pad_mux),
3053 .pad_mux_list = uart3_nopause_grp1_pad_mux, 4081 .pad_mux_list = uart3_txd_grp1_pad_mux,
3054}; 4082};
3055 4083
3056static struct atlas7_pad_mux uart4_grp0_pad_mux[] = { 4084static struct atlas7_pad_mux uart3_txd_grp2_pad_mux[] = {
3057 MUX(1, 122, 4, 0xa08, 1, 0xa88, 1), 4085 MUX(1, 161, 3, N, N, N, N),
3058 MUX(1, 123, 4, N, N, N, N), 4086};
4087
4088static struct atlas7_grp_mux uart3_txd_grp2_mux = {
4089 .pad_mux_count = ARRAY_SIZE(uart3_txd_grp2_pad_mux),
4090 .pad_mux_list = uart3_txd_grp2_pad_mux,
4091};
4092
4093static struct atlas7_pad_mux uart4_basic_grp_pad_mux[] = {
3059 MUX(1, 140, 1, N, N, N, N), 4094 MUX(1, 140, 1, N, N, N, N),
3060 MUX(1, 139, 1, N, N, N, N), 4095 MUX(1, 139, 1, N, N, N, N),
3061}; 4096};
3062 4097
3063static struct atlas7_grp_mux uart4_grp0_mux = { 4098static struct atlas7_grp_mux uart4_basic_grp_mux = {
3064 .pad_mux_count = ARRAY_SIZE(uart4_grp0_pad_mux), 4099 .pad_mux_count = ARRAY_SIZE(uart4_basic_grp_pad_mux),
3065 .pad_mux_list = uart4_grp0_pad_mux, 4100 .pad_mux_list = uart4_basic_grp_pad_mux,
4101};
4102
4103static struct atlas7_pad_mux uart4_cts_grp0_pad_mux[] = {
4104 MUX(1, 122, 4, 0xa08, 1, 0xa88, 1),
4105};
4106
4107static struct atlas7_grp_mux uart4_cts_grp0_mux = {
4108 .pad_mux_count = ARRAY_SIZE(uart4_cts_grp0_pad_mux),
4109 .pad_mux_list = uart4_cts_grp0_pad_mux,
3066}; 4110};
3067 4111
3068static struct atlas7_pad_mux uart4_grp1_pad_mux[] = { 4112static struct atlas7_pad_mux uart4_cts_grp1_pad_mux[] = {
3069 MUX(1, 100, 4, 0xa08, 1, 0xa88, 1), 4113 MUX(1, 100, 4, 0xa08, 1, 0xa88, 1),
3070 MUX(1, 99, 4, N, N, N, N),
3071 MUX(1, 140, 1, N, N, N, N),
3072 MUX(1, 139, 1, N, N, N, N),
3073}; 4114};
3074 4115
3075static struct atlas7_grp_mux uart4_grp1_mux = { 4116static struct atlas7_grp_mux uart4_cts_grp1_mux = {
3076 .pad_mux_count = ARRAY_SIZE(uart4_grp1_pad_mux), 4117 .pad_mux_count = ARRAY_SIZE(uart4_cts_grp1_pad_mux),
3077 .pad_mux_list = uart4_grp1_pad_mux, 4118 .pad_mux_list = uart4_cts_grp1_pad_mux,
3078}; 4119};
3079 4120
3080static struct atlas7_pad_mux uart4_grp2_pad_mux[] = { 4121static struct atlas7_pad_mux uart4_cts_grp2_pad_mux[] = {
3081 MUX(1, 117, 2, 0xa08, 1, 0xa88, 1), 4122 MUX(1, 117, 2, 0xa08, 1, 0xa88, 1),
3082 MUX(1, 116, 2, N, N, N, N),
3083 MUX(1, 140, 1, N, N, N, N),
3084 MUX(1, 139, 1, N, N, N, N),
3085}; 4123};
3086 4124
3087static struct atlas7_grp_mux uart4_grp2_mux = { 4125static struct atlas7_grp_mux uart4_cts_grp2_mux = {
3088 .pad_mux_count = ARRAY_SIZE(uart4_grp2_pad_mux), 4126 .pad_mux_count = ARRAY_SIZE(uart4_cts_grp2_pad_mux),
3089 .pad_mux_list = uart4_grp2_pad_mux, 4127 .pad_mux_list = uart4_cts_grp2_pad_mux,
3090}; 4128};
3091 4129
3092static struct atlas7_pad_mux uart4_nopause_grp_pad_mux[] = { 4130static struct atlas7_pad_mux uart4_rts_grp0_pad_mux[] = {
3093 MUX(1, 140, 1, N, N, N, N), 4131 MUX(1, 123, 4, N, N, N, N),
3094 MUX(1, 139, 1, N, N, N, N),
3095}; 4132};
3096 4133
3097static struct atlas7_grp_mux uart4_nopause_grp_mux = { 4134static struct atlas7_grp_mux uart4_rts_grp0_mux = {
3098 .pad_mux_count = ARRAY_SIZE(uart4_nopause_grp_pad_mux), 4135 .pad_mux_count = ARRAY_SIZE(uart4_rts_grp0_pad_mux),
3099 .pad_mux_list = uart4_nopause_grp_pad_mux, 4136 .pad_mux_list = uart4_rts_grp0_pad_mux,
3100}; 4137};
3101 4138
3102static struct atlas7_pad_mux usb0_drvvbus_grp_pad_mux[] = { 4139static struct atlas7_pad_mux uart4_rts_grp1_pad_mux[] = {
4140 MUX(1, 99, 4, N, N, N, N),
4141};
4142
4143static struct atlas7_grp_mux uart4_rts_grp1_mux = {
4144 .pad_mux_count = ARRAY_SIZE(uart4_rts_grp1_pad_mux),
4145 .pad_mux_list = uart4_rts_grp1_pad_mux,
4146};
4147
4148static struct atlas7_pad_mux uart4_rts_grp2_pad_mux[] = {
4149 MUX(1, 116, 2, N, N, N, N),
4150};
4151
4152static struct atlas7_grp_mux uart4_rts_grp2_mux = {
4153 .pad_mux_count = ARRAY_SIZE(uart4_rts_grp2_pad_mux),
4154 .pad_mux_list = uart4_rts_grp2_pad_mux,
4155};
4156
4157static struct atlas7_pad_mux usb0_drvvbus_grp0_pad_mux[] = {
3103 MUX(1, 51, 2, N, N, N, N), 4158 MUX(1, 51, 2, N, N, N, N),
3104}; 4159};
3105 4160
3106static struct atlas7_grp_mux usb0_drvvbus_grp_mux = { 4161static struct atlas7_grp_mux usb0_drvvbus_grp0_mux = {
3107 .pad_mux_count = ARRAY_SIZE(usb0_drvvbus_grp_pad_mux), 4162 .pad_mux_count = ARRAY_SIZE(usb0_drvvbus_grp0_pad_mux),
3108 .pad_mux_list = usb0_drvvbus_grp_pad_mux, 4163 .pad_mux_list = usb0_drvvbus_grp0_pad_mux,
4164};
4165
4166static struct atlas7_pad_mux usb0_drvvbus_grp1_pad_mux[] = {
4167 MUX(1, 162, 7, N, N, N, N),
3109}; 4168};
3110 4169
3111static struct atlas7_pad_mux usb1_drvvbus_grp_pad_mux[] = { 4170static struct atlas7_grp_mux usb0_drvvbus_grp1_mux = {
4171 .pad_mux_count = ARRAY_SIZE(usb0_drvvbus_grp1_pad_mux),
4172 .pad_mux_list = usb0_drvvbus_grp1_pad_mux,
4173};
4174
4175static struct atlas7_pad_mux usb1_drvvbus_grp0_pad_mux[] = {
3112 MUX(1, 134, 2, N, N, N, N), 4176 MUX(1, 134, 2, N, N, N, N),
3113}; 4177};
3114 4178
3115static struct atlas7_grp_mux usb1_drvvbus_grp_mux = { 4179static struct atlas7_grp_mux usb1_drvvbus_grp0_mux = {
3116 .pad_mux_count = ARRAY_SIZE(usb1_drvvbus_grp_pad_mux), 4180 .pad_mux_count = ARRAY_SIZE(usb1_drvvbus_grp0_pad_mux),
3117 .pad_mux_list = usb1_drvvbus_grp_pad_mux, 4181 .pad_mux_list = usb1_drvvbus_grp0_pad_mux,
4182};
4183
4184static struct atlas7_pad_mux usb1_drvvbus_grp1_pad_mux[] = {
4185 MUX(1, 163, 2, N, N, N, N),
4186};
4187
4188static struct atlas7_grp_mux usb1_drvvbus_grp1_mux = {
4189 .pad_mux_count = ARRAY_SIZE(usb1_drvvbus_grp1_pad_mux),
4190 .pad_mux_list = usb1_drvvbus_grp1_pad_mux,
3118}; 4191};
3119 4192
3120static struct atlas7_pad_mux visbus_dout_grp_pad_mux[] = { 4193static struct atlas7_pad_mux visbus_dout_grp_pad_mux[] = {
@@ -3252,11 +4325,20 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
3252 FUNCTION("sdio_i2s_gpio", sdio_i2s_gpio_grp, &sdio_i2s_gpio_grp_mux), 4325 FUNCTION("sdio_i2s_gpio", sdio_i2s_gpio_grp, &sdio_i2s_gpio_grp_mux),
3253 FUNCTION("sp_rgmii_gpio", sp_rgmii_gpio_grp, &sp_rgmii_gpio_grp_mux), 4326 FUNCTION("sp_rgmii_gpio", sp_rgmii_gpio_grp, &sp_rgmii_gpio_grp_mux),
3254 FUNCTION("lvds_gpio", lvds_gpio_grp, &lvds_gpio_grp_mux), 4327 FUNCTION("lvds_gpio", lvds_gpio_grp, &lvds_gpio_grp_mux),
3255 FUNCTION("uart_nand_gpio", 4328 FUNCTION("jtag_uart_nand_gpio",
3256 uart_nand_gpio_grp, 4329 jtag_uart_nand_gpio_grp,
3257 &uart_nand_gpio_grp_mux), 4330 &jtag_uart_nand_gpio_grp_mux),
3258 FUNCTION("rtc_gpio", rtc_gpio_grp, &rtc_gpio_grp_mux), 4331 FUNCTION("rtc_gpio", rtc_gpio_grp, &rtc_gpio_grp_mux),
3259 FUNCTION("audio_ac97", audio_ac97_grp, &audio_ac97_grp_mux), 4332 FUNCTION("audio_ac97", audio_ac97_grp, &audio_ac97_grp_mux),
4333 FUNCTION("audio_digmic_m0",
4334 audio_digmic_grp0,
4335 &audio_digmic_grp0_mux),
4336 FUNCTION("audio_digmic_m1",
4337 audio_digmic_grp1,
4338 &audio_digmic_grp1_mux),
4339 FUNCTION("audio_digmic_m2",
4340 audio_digmic_grp2,
4341 &audio_digmic_grp2_mux),
3260 FUNCTION("audio_func_dbg", 4342 FUNCTION("audio_func_dbg",
3261 audio_func_dbg_grp, 4343 audio_func_dbg_grp,
3262 &audio_func_dbg_grp_mux), 4344 &audio_func_dbg_grp_mux),
@@ -3265,16 +4347,119 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
3265 FUNCTION("audio_i2s_extclk", 4347 FUNCTION("audio_i2s_extclk",
3266 audio_i2s_extclk_grp, 4348 audio_i2s_extclk_grp,
3267 &audio_i2s_extclk_grp_mux), 4349 &audio_i2s_extclk_grp_mux),
3268 FUNCTION("audio_uart0", audio_uart0_grp, &audio_uart0_grp_mux), 4350 FUNCTION("audio_spdif_out_m0",
3269 FUNCTION("audio_uart1", audio_uart1_grp, &audio_uart1_grp_mux), 4351 audio_spdif_out_grp0,
3270 FUNCTION("audio_uart2_m0", audio_uart2_grp0, &audio_uart2_grp0_mux), 4352 &audio_spdif_out_grp0_mux),
3271 FUNCTION("audio_uart2_m1", audio_uart2_grp1, &audio_uart2_grp1_mux), 4353 FUNCTION("audio_spdif_out_m1",
3272 FUNCTION("c_can_trnsvr", c_can_trnsvr_grp, &c_can_trnsvr_grp_mux), 4354 audio_spdif_out_grp1,
3273 FUNCTION("c0_can_m0", c0_can_grp0, &c0_can_grp0_mux), 4355 &audio_spdif_out_grp1_mux),
3274 FUNCTION("c0_can_m1", c0_can_grp1, &c0_can_grp1_mux), 4356 FUNCTION("audio_spdif_out_m2",
3275 FUNCTION("c1_can_m0", c1_can_grp0, &c1_can_grp0_mux), 4357 audio_spdif_out_grp2,
3276 FUNCTION("c1_can_m1", c1_can_grp1, &c1_can_grp1_mux), 4358 &audio_spdif_out_grp2_mux),
3277 FUNCTION("c1_can_m2", c1_can_grp2, &c1_can_grp2_mux), 4359 FUNCTION("audio_uart0_basic",
4360 audio_uart0_basic_grp,
4361 &audio_uart0_basic_grp_mux),
4362 FUNCTION("audio_uart0_urfs_m0",
4363 audio_uart0_urfs_grp0,
4364 &audio_uart0_urfs_grp0_mux),
4365 FUNCTION("audio_uart0_urfs_m1",
4366 audio_uart0_urfs_grp1,
4367 &audio_uart0_urfs_grp1_mux),
4368 FUNCTION("audio_uart0_urfs_m2",
4369 audio_uart0_urfs_grp2,
4370 &audio_uart0_urfs_grp2_mux),
4371 FUNCTION("audio_uart0_urfs_m3",
4372 audio_uart0_urfs_grp3,
4373 &audio_uart0_urfs_grp3_mux),
4374 FUNCTION("audio_uart1_basic",
4375 audio_uart1_basic_grp,
4376 &audio_uart1_basic_grp_mux),
4377 FUNCTION("audio_uart1_urfs_m0",
4378 audio_uart1_urfs_grp0,
4379 &audio_uart1_urfs_grp0_mux),
4380 FUNCTION("audio_uart1_urfs_m1",
4381 audio_uart1_urfs_grp1,
4382 &audio_uart1_urfs_grp1_mux),
4383 FUNCTION("audio_uart1_urfs_m2",
4384 audio_uart1_urfs_grp2,
4385 &audio_uart1_urfs_grp2_mux),
4386 FUNCTION("audio_uart2_urfs_m0",
4387 audio_uart2_urfs_grp0,
4388 &audio_uart2_urfs_grp0_mux),
4389 FUNCTION("audio_uart2_urfs_m1",
4390 audio_uart2_urfs_grp1,
4391 &audio_uart2_urfs_grp1_mux),
4392 FUNCTION("audio_uart2_urfs_m2",
4393 audio_uart2_urfs_grp2,
4394 &audio_uart2_urfs_grp2_mux),
4395 FUNCTION("audio_uart2_urxd_m0",
4396 audio_uart2_urxd_grp0,
4397 &audio_uart2_urxd_grp0_mux),
4398 FUNCTION("audio_uart2_urxd_m1",
4399 audio_uart2_urxd_grp1,
4400 &audio_uart2_urxd_grp1_mux),
4401 FUNCTION("audio_uart2_urxd_m2",
4402 audio_uart2_urxd_grp2,
4403 &audio_uart2_urxd_grp2_mux),
4404 FUNCTION("audio_uart2_usclk_m0",
4405 audio_uart2_usclk_grp0,
4406 &audio_uart2_usclk_grp0_mux),
4407 FUNCTION("audio_uart2_usclk_m1",
4408 audio_uart2_usclk_grp1,
4409 &audio_uart2_usclk_grp1_mux),
4410 FUNCTION("audio_uart2_usclk_m2",
4411 audio_uart2_usclk_grp2,
4412 &audio_uart2_usclk_grp2_mux),
4413 FUNCTION("audio_uart2_utfs_m0",
4414 audio_uart2_utfs_grp0,
4415 &audio_uart2_utfs_grp0_mux),
4416 FUNCTION("audio_uart2_utfs_m1",
4417 audio_uart2_utfs_grp1,
4418 &audio_uart2_utfs_grp1_mux),
4419 FUNCTION("audio_uart2_utfs_m2",
4420 audio_uart2_utfs_grp2,
4421 &audio_uart2_utfs_grp2_mux),
4422 FUNCTION("audio_uart2_utxd_m0",
4423 audio_uart2_utxd_grp0,
4424 &audio_uart2_utxd_grp0_mux),
4425 FUNCTION("audio_uart2_utxd_m1",
4426 audio_uart2_utxd_grp1,
4427 &audio_uart2_utxd_grp1_mux),
4428 FUNCTION("audio_uart2_utxd_m2",
4429 audio_uart2_utxd_grp2,
4430 &audio_uart2_utxd_grp2_mux),
4431 FUNCTION("c_can_trnsvr_en_m0",
4432 c_can_trnsvr_en_grp0,
4433 &c_can_trnsvr_en_grp0_mux),
4434 FUNCTION("c_can_trnsvr_en_m1",
4435 c_can_trnsvr_en_grp1,
4436 &c_can_trnsvr_en_grp1_mux),
4437 FUNCTION("c_can_trnsvr_intr",
4438 c_can_trnsvr_intr_grp,
4439 &c_can_trnsvr_intr_grp_mux),
4440 FUNCTION("c_can_trnsvr_stb_n",
4441 c_can_trnsvr_stb_n_grp,
4442 &c_can_trnsvr_stb_n_grp_mux),
4443 FUNCTION("c0_can_rxd_trnsv0",
4444 c0_can_rxd_trnsv0_grp,
4445 &c0_can_rxd_trnsv0_grp_mux),
4446 FUNCTION("c0_can_rxd_trnsv1",
4447 c0_can_rxd_trnsv1_grp,
4448 &c0_can_rxd_trnsv1_grp_mux),
4449 FUNCTION("c0_can_txd_trnsv0",
4450 c0_can_txd_trnsv0_grp,
4451 &c0_can_txd_trnsv0_grp_mux),
4452 FUNCTION("c0_can_txd_trnsv1",
4453 c0_can_txd_trnsv1_grp,
4454 &c0_can_txd_trnsv1_grp_mux),
4455 FUNCTION("c1_can_rxd_m0", c1_can_rxd_grp0, &c1_can_rxd_grp0_mux),
4456 FUNCTION("c1_can_rxd_m1", c1_can_rxd_grp1, &c1_can_rxd_grp1_mux),
4457 FUNCTION("c1_can_rxd_m2", c1_can_rxd_grp2, &c1_can_rxd_grp2_mux),
4458 FUNCTION("c1_can_rxd_m3", c1_can_rxd_grp3, &c1_can_rxd_grp3_mux),
4459 FUNCTION("c1_can_txd_m0", c1_can_txd_grp0, &c1_can_txd_grp0_mux),
4460 FUNCTION("c1_can_txd_m1", c1_can_txd_grp1, &c1_can_txd_grp1_mux),
4461 FUNCTION("c1_can_txd_m2", c1_can_txd_grp2, &c1_can_txd_grp2_mux),
4462 FUNCTION("c1_can_txd_m3", c1_can_txd_grp3, &c1_can_txd_grp3_mux),
3278 FUNCTION("ca_audio_lpc", ca_audio_lpc_grp, &ca_audio_lpc_grp_mux), 4463 FUNCTION("ca_audio_lpc", ca_audio_lpc_grp, &ca_audio_lpc_grp_mux),
3279 FUNCTION("ca_bt_lpc", ca_bt_lpc_grp, &ca_bt_lpc_grp_mux), 4464 FUNCTION("ca_bt_lpc", ca_bt_lpc_grp, &ca_bt_lpc_grp_mux),
3280 FUNCTION("ca_coex", ca_coex_grp, &ca_coex_grp_mux), 4465 FUNCTION("ca_coex", ca_coex_grp, &ca_coex_grp_mux),
@@ -3377,7 +4562,35 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
3377 &gn_trg_shutdown_grp3_mux), 4562 &gn_trg_shutdown_grp3_mux),
3378 FUNCTION("i2c0", i2c0_grp, &i2c0_grp_mux), 4563 FUNCTION("i2c0", i2c0_grp, &i2c0_grp_mux),
3379 FUNCTION("i2c1", i2c1_grp, &i2c1_grp_mux), 4564 FUNCTION("i2c1", i2c1_grp, &i2c1_grp_mux),
3380 FUNCTION("jtag_m0", jtag_grp0, &jtag_grp0_mux), 4565 FUNCTION("i2s0", i2s0_grp, &i2s0_grp_mux),
4566 FUNCTION("i2s1_basic", i2s1_basic_grp, &i2s1_basic_grp_mux),
4567 FUNCTION("i2s1_rxd0_m0", i2s1_rxd0_grp0, &i2s1_rxd0_grp0_mux),
4568 FUNCTION("i2s1_rxd0_m1", i2s1_rxd0_grp1, &i2s1_rxd0_grp1_mux),
4569 FUNCTION("i2s1_rxd0_m2", i2s1_rxd0_grp2, &i2s1_rxd0_grp2_mux),
4570 FUNCTION("i2s1_rxd0_m3", i2s1_rxd0_grp3, &i2s1_rxd0_grp3_mux),
4571 FUNCTION("i2s1_rxd0_m4", i2s1_rxd0_grp4, &i2s1_rxd0_grp4_mux),
4572 FUNCTION("i2s1_rxd1_m0", i2s1_rxd1_grp0, &i2s1_rxd1_grp0_mux),
4573 FUNCTION("i2s1_rxd1_m1", i2s1_rxd1_grp1, &i2s1_rxd1_grp1_mux),
4574 FUNCTION("i2s1_rxd1_m2", i2s1_rxd1_grp2, &i2s1_rxd1_grp2_mux),
4575 FUNCTION("i2s1_rxd1_m3", i2s1_rxd1_grp3, &i2s1_rxd1_grp3_mux),
4576 FUNCTION("i2s1_rxd1_m4", i2s1_rxd1_grp4, &i2s1_rxd1_grp4_mux),
4577 FUNCTION("jtag_jt_dbg_nsrst",
4578 jtag_jt_dbg_nsrst_grp,
4579 &jtag_jt_dbg_nsrst_grp_mux),
4580 FUNCTION("jtag_ntrst_m0", jtag_ntrst_grp0, &jtag_ntrst_grp0_mux),
4581 FUNCTION("jtag_ntrst_m1", jtag_ntrst_grp1, &jtag_ntrst_grp1_mux),
4582 FUNCTION("jtag_swdiotms_m0",
4583 jtag_swdiotms_grp0,
4584 &jtag_swdiotms_grp0_mux),
4585 FUNCTION("jtag_swdiotms_m1",
4586 jtag_swdiotms_grp1,
4587 &jtag_swdiotms_grp1_mux),
4588 FUNCTION("jtag_tck_m0", jtag_tck_grp0, &jtag_tck_grp0_mux),
4589 FUNCTION("jtag_tck_m1", jtag_tck_grp1, &jtag_tck_grp1_mux),
4590 FUNCTION("jtag_tdi_m0", jtag_tdi_grp0, &jtag_tdi_grp0_mux),
4591 FUNCTION("jtag_tdi_m1", jtag_tdi_grp1, &jtag_tdi_grp1_mux),
4592 FUNCTION("jtag_tdo_m0", jtag_tdo_grp0, &jtag_tdo_grp0_mux),
4593 FUNCTION("jtag_tdo_m1", jtag_tdo_grp1, &jtag_tdo_grp1_mux),
3381 FUNCTION("ks_kas_spi_m0", ks_kas_spi_grp0, &ks_kas_spi_grp0_mux), 4594 FUNCTION("ks_kas_spi_m0", ks_kas_spi_grp0, &ks_kas_spi_grp0_mux),
3382 FUNCTION("ld_ldd", ld_ldd_grp, &ld_ldd_grp_mux), 4595 FUNCTION("ld_ldd", ld_ldd_grp, &ld_ldd_grp_mux),
3383 FUNCTION("ld_ldd_16bit", ld_ldd_16bit_grp, &ld_ldd_16bit_grp_mux), 4596 FUNCTION("ld_ldd_16bit", ld_ldd_16bit_grp, &ld_ldd_16bit_grp_mux),
@@ -3414,18 +4627,27 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
3414 FUNCTION("pw_cko0_m0", pw_cko0_grp0, &pw_cko0_grp0_mux), 4627 FUNCTION("pw_cko0_m0", pw_cko0_grp0, &pw_cko0_grp0_mux),
3415 FUNCTION("pw_cko0_m1", pw_cko0_grp1, &pw_cko0_grp1_mux), 4628 FUNCTION("pw_cko0_m1", pw_cko0_grp1, &pw_cko0_grp1_mux),
3416 FUNCTION("pw_cko0_m2", pw_cko0_grp2, &pw_cko0_grp2_mux), 4629 FUNCTION("pw_cko0_m2", pw_cko0_grp2, &pw_cko0_grp2_mux),
4630 FUNCTION("pw_cko0_m3", pw_cko0_grp3, &pw_cko0_grp3_mux),
3417 FUNCTION("pw_cko1_m0", pw_cko1_grp0, &pw_cko1_grp0_mux), 4631 FUNCTION("pw_cko1_m0", pw_cko1_grp0, &pw_cko1_grp0_mux),
3418 FUNCTION("pw_cko1_m1", pw_cko1_grp1, &pw_cko1_grp1_mux), 4632 FUNCTION("pw_cko1_m1", pw_cko1_grp1, &pw_cko1_grp1_mux),
4633 FUNCTION("pw_cko1_m2", pw_cko1_grp2, &pw_cko1_grp2_mux),
3419 FUNCTION("pw_i2s01_clk_m0", 4634 FUNCTION("pw_i2s01_clk_m0",
3420 pw_i2s01_clk_grp0, 4635 pw_i2s01_clk_grp0,
3421 &pw_i2s01_clk_grp0_mux), 4636 &pw_i2s01_clk_grp0_mux),
3422 FUNCTION("pw_i2s01_clk_m1", 4637 FUNCTION("pw_i2s01_clk_m1",
3423 pw_i2s01_clk_grp1, 4638 pw_i2s01_clk_grp1,
3424 &pw_i2s01_clk_grp1_mux), 4639 &pw_i2s01_clk_grp1_mux),
3425 FUNCTION("pw_pwm0", pw_pwm0_grp, &pw_pwm0_grp_mux), 4640 FUNCTION("pw_i2s01_clk_m2",
3426 FUNCTION("pw_pwm1", pw_pwm1_grp, &pw_pwm1_grp_mux), 4641 pw_i2s01_clk_grp2,
4642 &pw_i2s01_clk_grp2_mux),
4643 FUNCTION("pw_pwm0_m0", pw_pwm0_grp0, &pw_pwm0_grp0_mux),
4644 FUNCTION("pw_pwm0_m1", pw_pwm0_grp1, &pw_pwm0_grp1_mux),
4645 FUNCTION("pw_pwm1_m0", pw_pwm1_grp0, &pw_pwm1_grp0_mux),
4646 FUNCTION("pw_pwm1_m1", pw_pwm1_grp1, &pw_pwm1_grp1_mux),
4647 FUNCTION("pw_pwm1_m2", pw_pwm1_grp2, &pw_pwm1_grp2_mux),
3427 FUNCTION("pw_pwm2_m0", pw_pwm2_grp0, &pw_pwm2_grp0_mux), 4648 FUNCTION("pw_pwm2_m0", pw_pwm2_grp0, &pw_pwm2_grp0_mux),
3428 FUNCTION("pw_pwm2_m1", pw_pwm2_grp1, &pw_pwm2_grp1_mux), 4649 FUNCTION("pw_pwm2_m1", pw_pwm2_grp1, &pw_pwm2_grp1_mux),
4650 FUNCTION("pw_pwm2_m2", pw_pwm2_grp2, &pw_pwm2_grp2_mux),
3429 FUNCTION("pw_pwm3_m0", pw_pwm3_grp0, &pw_pwm3_grp0_mux), 4651 FUNCTION("pw_pwm3_m0", pw_pwm3_grp0, &pw_pwm3_grp0_mux),
3430 FUNCTION("pw_pwm3_m1", pw_pwm3_grp1, &pw_pwm3_grp1_mux), 4652 FUNCTION("pw_pwm3_m1", pw_pwm3_grp1, &pw_pwm3_grp1_mux),
3431 FUNCTION("pw_pwm_cpu_vol_m0", 4653 FUNCTION("pw_pwm_cpu_vol_m0",
@@ -3434,6 +4656,9 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
3434 FUNCTION("pw_pwm_cpu_vol_m1", 4656 FUNCTION("pw_pwm_cpu_vol_m1",
3435 pw_pwm_cpu_vol_grp1, 4657 pw_pwm_cpu_vol_grp1,
3436 &pw_pwm_cpu_vol_grp1_mux), 4658 &pw_pwm_cpu_vol_grp1_mux),
4659 FUNCTION("pw_pwm_cpu_vol_m2",
4660 pw_pwm_cpu_vol_grp2,
4661 &pw_pwm_cpu_vol_grp2_mux),
3437 FUNCTION("pw_backlight_m0", 4662 FUNCTION("pw_backlight_m0",
3438 pw_backlight_grp0, 4663 pw_backlight_grp0,
3439 &pw_backlight_grp0_mux), 4664 &pw_backlight_grp0_mux),
@@ -3456,8 +4681,11 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
3456 FUNCTION("sd1", sd1_grp, &sd1_grp_mux), 4681 FUNCTION("sd1", sd1_grp, &sd1_grp_mux),
3457 FUNCTION("sd1_4bit_m0", sd1_4bit_grp0, &sd1_4bit_grp0_mux), 4682 FUNCTION("sd1_4bit_m0", sd1_4bit_grp0, &sd1_4bit_grp0_mux),
3458 FUNCTION("sd1_4bit_m1", sd1_4bit_grp1, &sd1_4bit_grp1_mux), 4683 FUNCTION("sd1_4bit_m1", sd1_4bit_grp1, &sd1_4bit_grp1_mux),
3459 FUNCTION("sd2_m0", sd2_grp0, &sd2_grp0_mux), 4684 FUNCTION("sd2_basic", sd2_basic_grp, &sd2_basic_grp_mux),
3460 FUNCTION("sd2_no_cdb_m0", sd2_no_cdb_grp0, &sd2_no_cdb_grp0_mux), 4685 FUNCTION("sd2_cdb_m0", sd2_cdb_grp0, &sd2_cdb_grp0_mux),
4686 FUNCTION("sd2_cdb_m1", sd2_cdb_grp1, &sd2_cdb_grp1_mux),
4687 FUNCTION("sd2_wpb_m0", sd2_wpb_grp0, &sd2_wpb_grp0_mux),
4688 FUNCTION("sd2_wpb_m1", sd2_wpb_grp1, &sd2_wpb_grp1_mux),
3461 FUNCTION("sd3", sd3_grp, &sd3_grp_mux), 4689 FUNCTION("sd3", sd3_grp, &sd3_grp_mux),
3462 FUNCTION("sd5", sd5_grp, &sd5_grp_mux), 4690 FUNCTION("sd5", sd5_grp, &sd5_grp_mux),
3463 FUNCTION("sd6_m0", sd6_grp0, &sd6_grp0_mux), 4691 FUNCTION("sd6_m0", sd6_grp0, &sd6_grp0_mux),
@@ -3471,23 +4699,47 @@ static struct atlas7_pmx_func atlas7_pmx_functions[] = {
3471 FUNCTION("uart0", uart0_grp, &uart0_grp_mux), 4699 FUNCTION("uart0", uart0_grp, &uart0_grp_mux),
3472 FUNCTION("uart0_nopause", uart0_nopause_grp, &uart0_nopause_grp_mux), 4700 FUNCTION("uart0_nopause", uart0_nopause_grp, &uart0_nopause_grp_mux),
3473 FUNCTION("uart1", uart1_grp, &uart1_grp_mux), 4701 FUNCTION("uart1", uart1_grp, &uart1_grp_mux),
3474 FUNCTION("uart2", uart2_grp, &uart2_grp_mux), 4702 FUNCTION("uart2_cts_m0", uart2_cts_grp0, &uart2_cts_grp0_mux),
3475 FUNCTION("uart3_m0", uart3_grp0, &uart3_grp0_mux), 4703 FUNCTION("uart2_cts_m1", uart2_cts_grp1, &uart2_cts_grp1_mux),
3476 FUNCTION("uart3_m1", uart3_grp1, &uart3_grp1_mux), 4704 FUNCTION("uart2_rts_m0", uart2_rts_grp0, &uart2_rts_grp0_mux),
3477 FUNCTION("uart3_m2", uart3_grp2, &uart3_grp2_mux), 4705 FUNCTION("uart2_rts_m1", uart2_rts_grp1, &uart2_rts_grp1_mux),
3478 FUNCTION("uart3_m3", uart3_grp3, &uart3_grp3_mux), 4706 FUNCTION("uart2_rxd_m0", uart2_rxd_grp0, &uart2_rxd_grp0_mux),
3479 FUNCTION("uart3_nopause_m0", 4707 FUNCTION("uart2_rxd_m1", uart2_rxd_grp1, &uart2_rxd_grp1_mux),
3480 uart3_nopause_grp0, 4708 FUNCTION("uart2_rxd_m2", uart2_rxd_grp2, &uart2_rxd_grp2_mux),
3481 &uart3_nopause_grp0_mux), 4709 FUNCTION("uart2_txd_m0", uart2_txd_grp0, &uart2_txd_grp0_mux),
3482 FUNCTION("uart3_nopause_m1", 4710 FUNCTION("uart2_txd_m1", uart2_txd_grp1, &uart2_txd_grp1_mux),
3483 uart3_nopause_grp1, 4711 FUNCTION("uart2_txd_m2", uart2_txd_grp2, &uart2_txd_grp2_mux),
3484 &uart3_nopause_grp1_mux), 4712 FUNCTION("uart3_cts_m0", uart3_cts_grp0, &uart3_cts_grp0_mux),
3485 FUNCTION("uart4_m0", uart4_grp0, &uart4_grp0_mux), 4713 FUNCTION("uart3_cts_m1", uart3_cts_grp1, &uart3_cts_grp1_mux),
3486 FUNCTION("uart4_m1", uart4_grp1, &uart4_grp1_mux), 4714 FUNCTION("uart3_cts_m2", uart3_cts_grp2, &uart3_cts_grp2_mux),
3487 FUNCTION("uart4_m2", uart4_grp2, &uart4_grp2_mux), 4715 FUNCTION("uart3_rts_m0", uart3_rts_grp0, &uart3_rts_grp0_mux),
3488 FUNCTION("uart4_nopause", uart4_nopause_grp, &uart4_nopause_grp_mux), 4716 FUNCTION("uart3_rts_m1", uart3_rts_grp1, &uart3_rts_grp1_mux),
3489 FUNCTION("usb0_drvvbus", usb0_drvvbus_grp, &usb0_drvvbus_grp_mux), 4717 FUNCTION("uart3_rts_m2", uart3_rts_grp2, &uart3_rts_grp2_mux),
3490 FUNCTION("usb1_drvvbus", usb1_drvvbus_grp, &usb1_drvvbus_grp_mux), 4718 FUNCTION("uart3_rxd_m0", uart3_rxd_grp0, &uart3_rxd_grp0_mux),
4719 FUNCTION("uart3_rxd_m1", uart3_rxd_grp1, &uart3_rxd_grp1_mux),
4720 FUNCTION("uart3_rxd_m2", uart3_rxd_grp2, &uart3_rxd_grp2_mux),
4721 FUNCTION("uart3_txd_m0", uart3_txd_grp0, &uart3_txd_grp0_mux),
4722 FUNCTION("uart3_txd_m1", uart3_txd_grp1, &uart3_txd_grp1_mux),
4723 FUNCTION("uart3_txd_m2", uart3_txd_grp2, &uart3_txd_grp2_mux),
4724 FUNCTION("uart4_basic", uart4_basic_grp, &uart4_basic_grp_mux),
4725 FUNCTION("uart4_cts_m0", uart4_cts_grp0, &uart4_cts_grp0_mux),
4726 FUNCTION("uart4_cts_m1", uart4_cts_grp1, &uart4_cts_grp1_mux),
4727 FUNCTION("uart4_cts_m2", uart4_cts_grp2, &uart4_cts_grp2_mux),
4728 FUNCTION("uart4_rts_m0", uart4_rts_grp0, &uart4_rts_grp0_mux),
4729 FUNCTION("uart4_rts_m1", uart4_rts_grp1, &uart4_rts_grp1_mux),
4730 FUNCTION("uart4_rts_m2", uart4_rts_grp2, &uart4_rts_grp2_mux),
4731 FUNCTION("usb0_drvvbus_m0",
4732 usb0_drvvbus_grp0,
4733 &usb0_drvvbus_grp0_mux),
4734 FUNCTION("usb0_drvvbus_m1",
4735 usb0_drvvbus_grp1,
4736 &usb0_drvvbus_grp1_mux),
4737 FUNCTION("usb1_drvvbus_m0",
4738 usb1_drvvbus_grp0,
4739 &usb1_drvvbus_grp0_mux),
4740 FUNCTION("usb1_drvvbus_m1",
4741 usb1_drvvbus_grp1,
4742 &usb1_drvvbus_grp1_mux),
3491 FUNCTION("visbus_dout", visbus_dout_grp, &visbus_dout_grp_mux), 4743 FUNCTION("visbus_dout", visbus_dout_grp, &visbus_dout_grp_mux),
3492 FUNCTION("vi_vip1", vi_vip1_grp, &vi_vip1_grp_mux), 4744 FUNCTION("vi_vip1", vi_vip1_grp, &vi_vip1_grp_mux),
3493 FUNCTION("vi_vip1_ext", vi_vip1_ext_grp, &vi_vip1_ext_grp_mux), 4745 FUNCTION("vi_vip1_ext", vi_vip1_ext_grp, &vi_vip1_ext_grp_mux),
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index ae27872ff3a6..e68fd951129a 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -42,6 +42,10 @@ config PINCTRL_SUN8I_A33
42 def_bool MACH_SUN8I 42 def_bool MACH_SUN8I
43 select PINCTRL_SUNXI_COMMON 43 select PINCTRL_SUNXI_COMMON
44 44
45config PINCTRL_SUN8I_A83T
46 def_bool MACH_SUN8I
47 select PINCTRL_SUNXI_COMMON
48
45config PINCTRL_SUN8I_A23_R 49config PINCTRL_SUN8I_A23_R
46 def_bool MACH_SUN8I 50 def_bool MACH_SUN8I
47 depends on RESET_CONTROLLER 51 depends on RESET_CONTROLLER
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index 227a1213947c..e08029034510 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -12,4 +12,5 @@ obj-$(CONFIG_PINCTRL_SUN7I_A20) += pinctrl-sun7i-a20.o
12obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o 12obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o
13obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o 13obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o
14obj-$(CONFIG_PINCTRL_SUN8I_A33) += pinctrl-sun8i-a33.o 14obj-$(CONFIG_PINCTRL_SUN8I_A33) += pinctrl-sun8i-a33.o
15obj-$(CONFIG_PINCTRL_SUN8I_A83T) += pinctrl-sun8i-a83t.o
15obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o 16obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index 9596b0a3df6b..d4bc4f0e8be0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -47,45 +47,57 @@ static const struct sunxi_desc_pin sun6i_a31_r_pins[] = {
47 SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5), 47 SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5),
48 SUNXI_FUNCTION(0x0, "gpio_in"), 48 SUNXI_FUNCTION(0x0, "gpio_in"),
49 SUNXI_FUNCTION(0x1, "gpio_out"), 49 SUNXI_FUNCTION(0x1, "gpio_out"),
50 SUNXI_FUNCTION_IRQ_BANK(0x2, 0, 0), /* PL_EINT0 */
50 SUNXI_FUNCTION(0x3, "s_jtag")), /* MS */ 51 SUNXI_FUNCTION(0x3, "s_jtag")), /* MS */
51 SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6), 52 SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6),
52 SUNXI_FUNCTION(0x0, "gpio_in"), 53 SUNXI_FUNCTION(0x0, "gpio_in"),
53 SUNXI_FUNCTION(0x1, "gpio_out"), 54 SUNXI_FUNCTION(0x1, "gpio_out"),
55 SUNXI_FUNCTION_IRQ_BANK(0x2, 0, 1), /* PL_EINT1 */
54 SUNXI_FUNCTION(0x3, "s_jtag")), /* CK */ 56 SUNXI_FUNCTION(0x3, "s_jtag")), /* CK */
55 SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7), 57 SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7),
56 SUNXI_FUNCTION(0x0, "gpio_in"), 58 SUNXI_FUNCTION(0x0, "gpio_in"),
57 SUNXI_FUNCTION(0x1, "gpio_out"), 59 SUNXI_FUNCTION(0x1, "gpio_out"),
60 SUNXI_FUNCTION_IRQ_BANK(0x2, 0, 2), /* PL_EINT2 */
58 SUNXI_FUNCTION(0x3, "s_jtag")), /* DO */ 61 SUNXI_FUNCTION(0x3, "s_jtag")), /* DO */
59 SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8), 62 SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8),
60 SUNXI_FUNCTION(0x0, "gpio_in"), 63 SUNXI_FUNCTION(0x0, "gpio_in"),
61 SUNXI_FUNCTION(0x1, "gpio_out"), 64 SUNXI_FUNCTION(0x1, "gpio_out"),
65 SUNXI_FUNCTION_IRQ_BANK(0x2, 0, 3), /* PL_EINT3 */
62 SUNXI_FUNCTION(0x3, "s_jtag")), /* DI */ 66 SUNXI_FUNCTION(0x3, "s_jtag")), /* DI */
63 /* Hole */ 67 /* Hole */
64 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 0), 68 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 0),
65 SUNXI_FUNCTION(0x0, "gpio_in"), 69 SUNXI_FUNCTION(0x0, "gpio_in"),
66 SUNXI_FUNCTION(0x1, "gpio_out")), 70 SUNXI_FUNCTION(0x1, "gpio_out"),
71 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 0)), /* PM_EINT0 */
67 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 1), 72 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 1),
68 SUNXI_FUNCTION(0x0, "gpio_in"), 73 SUNXI_FUNCTION(0x0, "gpio_in"),
69 SUNXI_FUNCTION(0x1, "gpio_out")), 74 SUNXI_FUNCTION(0x1, "gpio_out"),
75 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 1)), /* PM_EINT1 */
70 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 2), 76 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 2),
71 SUNXI_FUNCTION(0x0, "gpio_in"), 77 SUNXI_FUNCTION(0x0, "gpio_in"),
72 SUNXI_FUNCTION(0x1, "gpio_out"), 78 SUNXI_FUNCTION(0x1, "gpio_out"),
79 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 2), /* PM_EINT2 */
73 SUNXI_FUNCTION(0x3, "1wire")), 80 SUNXI_FUNCTION(0x3, "1wire")),
74 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 3), 81 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 3),
75 SUNXI_FUNCTION(0x0, "gpio_in"), 82 SUNXI_FUNCTION(0x0, "gpio_in"),
76 SUNXI_FUNCTION(0x1, "gpio_out")), 83 SUNXI_FUNCTION(0x1, "gpio_out"),
84 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 3)), /* PM_EINT3 */
77 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 4), 85 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 4),
78 SUNXI_FUNCTION(0x0, "gpio_in"), 86 SUNXI_FUNCTION(0x0, "gpio_in"),
79 SUNXI_FUNCTION(0x1, "gpio_out")), 87 SUNXI_FUNCTION(0x1, "gpio_out"),
88 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 4)), /* PM_EINT4 */
80 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 5), 89 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 5),
81 SUNXI_FUNCTION(0x0, "gpio_in"), 90 SUNXI_FUNCTION(0x0, "gpio_in"),
82 SUNXI_FUNCTION(0x1, "gpio_out")), 91 SUNXI_FUNCTION(0x1, "gpio_out"),
92 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 5)), /* PM_EINT5 */
83 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 6), 93 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 6),
84 SUNXI_FUNCTION(0x0, "gpio_in"), 94 SUNXI_FUNCTION(0x0, "gpio_in"),
85 SUNXI_FUNCTION(0x1, "gpio_out")), 95 SUNXI_FUNCTION(0x1, "gpio_out"),
96 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 6)), /* PM_EINT6 */
86 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 7), 97 SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 7),
87 SUNXI_FUNCTION(0x0, "gpio_in"), 98 SUNXI_FUNCTION(0x0, "gpio_in"),
88 SUNXI_FUNCTION(0x1, "gpio_out"), 99 SUNXI_FUNCTION(0x1, "gpio_out"),
100 SUNXI_FUNCTION_IRQ_BANK(0x2, 1, 7), /* PM_EINT7 */
89 SUNXI_FUNCTION(0x3, "rtc")), /* CLKO */ 101 SUNXI_FUNCTION(0x3, "rtc")), /* CLKO */
90}; 102};
91 103
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
new file mode 100644
index 000000000000..90b973e15982
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -0,0 +1,603 @@
1/*
2 * Allwinner a83t SoCs pinctrl driver.
3 *
4 * Copyright (C) 2015 Vishnu Patekar <vishnupatekar0510@gmail.com>
5 *
6 * Based on pinctrl-sun8i-a23.c, which is:
7 * Copyright (C) 2014 Chen-Yu Tsai <wens@csie.org>
8 * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/of.h>
18#include <linux/of_device.h>
19#include <linux/pinctrl/pinctrl.h>
20
21#include "pinctrl-sunxi.h"
22
23static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
24 /* Hole */
25 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
26 SUNXI_FUNCTION(0x0, "gpio_in"),
27 SUNXI_FUNCTION(0x1, "gpio_out"),
28 SUNXI_FUNCTION(0x2, "uart2"), /* TX */
29 SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */
30 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PB_EINT0 */
31 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
32 SUNXI_FUNCTION(0x0, "gpio_in"),
33 SUNXI_FUNCTION(0x1, "gpio_out"),
34 SUNXI_FUNCTION(0x2, "uart2"), /* RX */
35 SUNXI_FUNCTION(0x3, "jtag"), /* CK0 */
36 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PB_EINT1 */
37 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
38 SUNXI_FUNCTION(0x0, "gpio_in"),
39 SUNXI_FUNCTION(0x1, "gpio_out"),
40 SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
41 SUNXI_FUNCTION(0x3, "jtag"), /* DO0 */
42 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PB_EINT2 */
43 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
44 SUNXI_FUNCTION(0x0, "gpio_in"),
45 SUNXI_FUNCTION(0x1, "gpio_out"),
46 SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
47 SUNXI_FUNCTION(0x3, "jtag"), /* DI0 */
48 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PB_EINT3 */
49 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
50 SUNXI_FUNCTION(0x0, "gpio_in"),
51 SUNXI_FUNCTION(0x1, "gpio_out"),
52 SUNXI_FUNCTION(0x2, "i2s0"), /* LRCK */
53 SUNXI_FUNCTION(0x3, "tdm"), /* LRCK */
54 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PB_EINT4 */
55 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
56 SUNXI_FUNCTION(0x0, "gpio_in"),
57 SUNXI_FUNCTION(0x1, "gpio_out"),
58 SUNXI_FUNCTION(0x2, "i2s0"), /* BCLK */
59 SUNXI_FUNCTION(0x3, "tdm"), /* BCLK */
60 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PB_EINT5 */
61 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
62 SUNXI_FUNCTION(0x0, "gpio_in"),
63 SUNXI_FUNCTION(0x1, "gpio_out"),
64 SUNXI_FUNCTION(0x2, "i2s0"), /* DOUT */
65 SUNXI_FUNCTION(0x3, "tdm"), /* DOUT */
66 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PB_EINT6 */
67 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
68 SUNXI_FUNCTION(0x0, "gpio_in"),
69 SUNXI_FUNCTION(0x1, "gpio_out"),
70 SUNXI_FUNCTION(0x2, "i2s0"), /* DIN */
71 SUNXI_FUNCTION(0x3, "tdm"), /* DIN */
72 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PB_EINT7 */
73 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
74 SUNXI_FUNCTION(0x0, "gpio_in"),
75 SUNXI_FUNCTION(0x1, "gpio_out"),
76 SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
77 SUNXI_FUNCTION(0x3, "tdm"), /* MCLK */
78 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PB_EINT8 */
79 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
80 SUNXI_FUNCTION(0x0, "gpio_in"),
81 SUNXI_FUNCTION(0x1, "gpio_out"),
82 SUNXI_FUNCTION(0x2, "uart0"), /* TX */
83 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PB_EINT9 */
84 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
85 SUNXI_FUNCTION(0x0, "gpio_in"),
86 SUNXI_FUNCTION(0x1, "gpio_out"),
87 SUNXI_FUNCTION(0x2, "uart0"), /* RX */
88 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PB_EINT10 */
89 /* Hole */
90 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
91 SUNXI_FUNCTION(0x0, "gpio_in"),
92 SUNXI_FUNCTION(0x1, "gpio_out"),
93 SUNXI_FUNCTION(0x2, "nand0"), /* WE */
94 SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
95 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
96 SUNXI_FUNCTION(0x0, "gpio_in"),
97 SUNXI_FUNCTION(0x1, "gpio_out"),
98 SUNXI_FUNCTION(0x2, "nand0"), /* ALE */
99 SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
100 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
101 SUNXI_FUNCTION(0x0, "gpio_in"),
102 SUNXI_FUNCTION(0x1, "gpio_out"),
103 SUNXI_FUNCTION(0x2, "nand0"), /* CLE */
104 SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
105 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
106 SUNXI_FUNCTION(0x0, "gpio_in"),
107 SUNXI_FUNCTION(0x1, "gpio_out"),
108 SUNXI_FUNCTION(0x2, "nand0"), /* CE1 */
109 SUNXI_FUNCTION(0x3, "spi0")), /* CS */
110 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
111 SUNXI_FUNCTION(0x0, "gpio_in"),
112 SUNXI_FUNCTION(0x1, "gpio_out"),
113 SUNXI_FUNCTION(0x2, "nand0")), /* CE0 */
114 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
115 SUNXI_FUNCTION(0x0, "gpio_in"),
116 SUNXI_FUNCTION(0x1, "gpio_out"),
117 SUNXI_FUNCTION(0x2, "nand0"), /* RE */
118 SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
119 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
120 SUNXI_FUNCTION(0x0, "gpio_in"),
121 SUNXI_FUNCTION(0x1, "gpio_out"),
122 SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */
123 SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
124 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
125 SUNXI_FUNCTION(0x0, "gpio_in"),
126 SUNXI_FUNCTION(0x1, "gpio_out"),
127 SUNXI_FUNCTION(0x2, "nand0")), /* RB1 */
128 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
129 SUNXI_FUNCTION(0x0, "gpio_in"),
130 SUNXI_FUNCTION(0x1, "gpio_out"),
131 SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */
132 SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
133 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
134 SUNXI_FUNCTION(0x0, "gpio_in"),
135 SUNXI_FUNCTION(0x1, "gpio_out"),
136 SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */
137 SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
138 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
139 SUNXI_FUNCTION(0x0, "gpio_in"),
140 SUNXI_FUNCTION(0x1, "gpio_out"),
141 SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */
142 SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
143 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
144 SUNXI_FUNCTION(0x0, "gpio_in"),
145 SUNXI_FUNCTION(0x1, "gpio_out"),
146 SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */
147 SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
148 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
149 SUNXI_FUNCTION(0x0, "gpio_in"),
150 SUNXI_FUNCTION(0x1, "gpio_out"),
151 SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */
152 SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
153 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
154 SUNXI_FUNCTION(0x0, "gpio_in"),
155 SUNXI_FUNCTION(0x1, "gpio_out"),
156 SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */
157 SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
158 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
159 SUNXI_FUNCTION(0x0, "gpio_in"),
160 SUNXI_FUNCTION(0x1, "gpio_out"),
161 SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */
162 SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
163 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
164 SUNXI_FUNCTION(0x0, "gpio_in"),
165 SUNXI_FUNCTION(0x1, "gpio_out"),
166 SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */
167 SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
168 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
169 SUNXI_FUNCTION(0x0, "gpio_in"),
170 SUNXI_FUNCTION(0x1, "gpio_out"),
171 SUNXI_FUNCTION(0x2, "nand"), /* DQS */
172 SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
173 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
174 SUNXI_FUNCTION(0x0, "gpio_in"),
175 SUNXI_FUNCTION(0x1, "gpio_out"),
176 SUNXI_FUNCTION(0x2, "nand")), /* CE2 */
177 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
178 SUNXI_FUNCTION(0x0, "gpio_in"),
179 SUNXI_FUNCTION(0x1, "gpio_out"),
180 SUNXI_FUNCTION(0x2, "nand")), /* CE3 */
181 /* Hole */
182 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
183 SUNXI_FUNCTION(0x0, "gpio_in"),
184 SUNXI_FUNCTION(0x1, "gpio_out"),
185 SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
186 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII RXD3 */
187 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
188 SUNXI_FUNCTION(0x0, "gpio_in"),
189 SUNXI_FUNCTION(0x1, "gpio_out"),
190 SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
191 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII RXD2 */
192 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
193 SUNXI_FUNCTION(0x0, "gpio_in"),
194 SUNXI_FUNCTION(0x1, "gpio_out"),
195 SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
196 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII RXD1 */
197 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
198 SUNXI_FUNCTION(0x0, "gpio_in"),
199 SUNXI_FUNCTION(0x1, "gpio_out"),
200 SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
201 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII RXD0 */
202 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
203 SUNXI_FUNCTION(0x0, "gpio_in"),
204 SUNXI_FUNCTION(0x1, "gpio_out"),
205 SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
206 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII RXCK */
207 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
208 SUNXI_FUNCTION(0x0, "gpio_in"),
209 SUNXI_FUNCTION(0x1, "gpio_out"),
210 SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
211 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII RXDV */
212 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
213 SUNXI_FUNCTION(0x0, "gpio_in"),
214 SUNXI_FUNCTION(0x1, "gpio_out"),
215 SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
216 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII RXERR */
217 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
218 SUNXI_FUNCTION(0x0, "gpio_in"),
219 SUNXI_FUNCTION(0x1, "gpio_out"),
220 SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
221 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII TXD3 */
222 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
223 SUNXI_FUNCTION(0x0, "gpio_in"),
224 SUNXI_FUNCTION(0x1, "gpio_out"),
225 SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
226 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII TXD2 */
227 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
228 SUNXI_FUNCTION(0x0, "gpio_in"),
229 SUNXI_FUNCTION(0x1, "gpio_out"),
230 SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
231 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII TXD1 */
232 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
233 SUNXI_FUNCTION(0x0, "gpio_in"),
234 SUNXI_FUNCTION(0x1, "gpio_out"),
235 SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
236 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII / MII TXD0 */
237 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
238 SUNXI_FUNCTION(0x0, "gpio_in"),
239 SUNXI_FUNCTION(0x1, "gpio_out"),
240 SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
241 SUNXI_FUNCTION(0x4, "gmac")), /* RGMII-NULL / MII-CRS */
242 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
243 SUNXI_FUNCTION(0x0, "gpio_in"),
244 SUNXI_FUNCTION(0x1, "gpio_out"),
245 SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
246 SUNXI_FUNCTION(0x3, "lvds0"), /* VP0 */
247 SUNXI_FUNCTION(0x4, "gmac")), /* GTXCK / ETXCK */
248 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
249 SUNXI_FUNCTION(0x0, "gpio_in"),
250 SUNXI_FUNCTION(0x1, "gpio_out"),
251 SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
252 SUNXI_FUNCTION(0x3, "lvds0"), /* VN0 */
253 SUNXI_FUNCTION(0x4, "gmac")), /* GTXCTL / ETXEL */
254 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
255 SUNXI_FUNCTION(0x0, "gpio_in"),
256 SUNXI_FUNCTION(0x1, "gpio_out"),
257 SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
258 SUNXI_FUNCTION(0x3, "lvds0"), /* VP1 */
259 SUNXI_FUNCTION(0x4, "gmac")), /* GNULL / ETXERR */
260 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
261 SUNXI_FUNCTION(0x0, "gpio_in"),
262 SUNXI_FUNCTION(0x1, "gpio_out"),
263 SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
264 SUNXI_FUNCTION(0x3, "lvds0"), /* VN1 */
265 SUNXI_FUNCTION(0x4, "gmac")), /* GCLKIN / ECOL */
266 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
267 SUNXI_FUNCTION(0x0, "gpio_in"),
268 SUNXI_FUNCTION(0x1, "gpio_out"),
269 SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
270 SUNXI_FUNCTION(0x3, "lvds0"), /* VP2 */
271 SUNXI_FUNCTION(0x4, "gmac")), /* GMDC */
272 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
273 SUNXI_FUNCTION(0x0, "gpio_in"),
274 SUNXI_FUNCTION(0x1, "gpio_out"),
275 SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
276 SUNXI_FUNCTION(0x3, "lvds0"), /* VN2 */
277 SUNXI_FUNCTION(0x4, "gmac")), /* GMDIO */
278 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
279 SUNXI_FUNCTION(0x0, "gpio_in"),
280 SUNXI_FUNCTION(0x1, "gpio_out"),
281 SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
282 SUNXI_FUNCTION(0x3, "lvds0")), /* VPC */
283 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
284 SUNXI_FUNCTION(0x0, "gpio_in"),
285 SUNXI_FUNCTION(0x1, "gpio_out"),
286 SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
287 SUNXI_FUNCTION(0x3, "lvds0")), /* VNC */
288 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
289 SUNXI_FUNCTION(0x0, "gpio_in"),
290 SUNXI_FUNCTION(0x1, "gpio_out"),
291 SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
292 SUNXI_FUNCTION(0x3, "lvds0")), /* VP3 */
293 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
294 SUNXI_FUNCTION(0x0, "gpio_in"),
295 SUNXI_FUNCTION(0x1, "gpio_out"),
296 SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
297 SUNXI_FUNCTION(0x3, "lvds0")), /* VN3 */
298 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 28),
299 SUNXI_FUNCTION(0x0, "gpio_in"),
300 SUNXI_FUNCTION(0x1, "gpio_out"),
301 SUNXI_FUNCTION(0x2, "pwm")), /* PWM */
302 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 29),
303 SUNXI_FUNCTION(0x0, "gpio_in"),
304 SUNXI_FUNCTION(0x1, "gpio_out")),
305 /* Hole */
306 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
307 SUNXI_FUNCTION(0x0, "gpio_in"),
308 SUNXI_FUNCTION(0x1, "gpio_out"),
309 SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
310 SUNXI_FUNCTION(0x4, "ccir")), /* CLK */
311 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
312 SUNXI_FUNCTION(0x0, "gpio_in"),
313 SUNXI_FUNCTION(0x1, "gpio_out"),
314 SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
315 SUNXI_FUNCTION(0x4, "ccir")), /* DE */
316 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
317 SUNXI_FUNCTION(0x0, "gpio_in"),
318 SUNXI_FUNCTION(0x1, "gpio_out"),
319 SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
320 SUNXI_FUNCTION(0x4, "ccir")), /* HSYNC */
321 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
322 SUNXI_FUNCTION(0x0, "gpio_in"),
323 SUNXI_FUNCTION(0x1, "gpio_out"),
324 SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
325 SUNXI_FUNCTION(0x4, "ccir")), /* VSYNC */
326 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
327 SUNXI_FUNCTION(0x0, "gpio_in"),
328 SUNXI_FUNCTION(0x1, "gpio_out"),
329 SUNXI_FUNCTION(0x2, "csi")), /* D0 */
330 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
331 SUNXI_FUNCTION(0x0, "gpio_in"),
332 SUNXI_FUNCTION(0x1, "gpio_out"),
333 SUNXI_FUNCTION(0x2, "csi")), /* D1 */
334 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
335 SUNXI_FUNCTION(0x0, "gpio_in"),
336 SUNXI_FUNCTION(0x1, "gpio_out"),
337 SUNXI_FUNCTION(0x2, "csi"), /* D2 */
338 SUNXI_FUNCTION(0x4, "ccir")), /* D0 */
339 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
340 SUNXI_FUNCTION(0x0, "gpio_in"),
341 SUNXI_FUNCTION(0x1, "gpio_out"),
342 SUNXI_FUNCTION(0x2, "csi"), /* D3 */
343 SUNXI_FUNCTION(0x4, "ccir")), /* D1 */
344 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
345 SUNXI_FUNCTION(0x0, "gpio_in"),
346 SUNXI_FUNCTION(0x1, "gpio_out"),
347 SUNXI_FUNCTION(0x2, "csi"), /* D4 */
348 SUNXI_FUNCTION(0x4, "ccir")), /* D2 */
349 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
350 SUNXI_FUNCTION(0x0, "gpio_in"),
351 SUNXI_FUNCTION(0x1, "gpio_out"),
352 SUNXI_FUNCTION(0x2, "csi"), /* D5 */
353 SUNXI_FUNCTION(0x4, "ccir")), /* D3 */
354 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
355 SUNXI_FUNCTION(0x0, "gpio_in"),
356 SUNXI_FUNCTION(0x1, "gpio_out"),
357 SUNXI_FUNCTION(0x2, "csi"), /* D6 */
358 SUNXI_FUNCTION(0x3, "uart4"), /* TX */
359 SUNXI_FUNCTION(0x4, "ccir")), /* D4 */
360 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
361 SUNXI_FUNCTION(0x0, "gpio_in"),
362 SUNXI_FUNCTION(0x1, "gpio_out"),
363 SUNXI_FUNCTION(0x2, "csi"), /* D7 */
364 SUNXI_FUNCTION(0x3, "uart4"), /* RX */
365 SUNXI_FUNCTION(0x4, "ccir")), /* D5 */
366 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
367 SUNXI_FUNCTION(0x0, "gpio_in"),
368 SUNXI_FUNCTION(0x1, "gpio_out"),
369 SUNXI_FUNCTION(0x2, "csi"), /* D8 */
370 SUNXI_FUNCTION(0x3, "uart4"), /* RTS */
371 SUNXI_FUNCTION(0x4, "ccir")), /* D6 */
372 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
373 SUNXI_FUNCTION(0x0, "gpio_in"),
374 SUNXI_FUNCTION(0x1, "gpio_out"),
375 SUNXI_FUNCTION(0x2, "csi"), /* D9 */
376 SUNXI_FUNCTION(0x3, "uart4"), /* CTS */
377 SUNXI_FUNCTION(0x4, "ccir")), /* D7 */
378 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
379 SUNXI_FUNCTION(0x0, "gpio_in"),
380 SUNXI_FUNCTION(0x1, "gpio_out"),
381 SUNXI_FUNCTION(0x2, "csi"), /* SCK */
382 SUNXI_FUNCTION(0x3, "i2c2")), /* SCK */
383 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
384 SUNXI_FUNCTION(0x0, "gpio_in"),
385 SUNXI_FUNCTION(0x1, "gpio_out"),
386 SUNXI_FUNCTION(0x2, "csi"), /* SDA */
387 SUNXI_FUNCTION(0x3, "i2c2")), /* SDA */
388 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
389 SUNXI_FUNCTION(0x0, "gpio_in"),
390 SUNXI_FUNCTION(0x1, "gpio_out")),
391 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
392 SUNXI_FUNCTION(0x0, "gpio_in"),
393 SUNXI_FUNCTION(0x1, "gpio_out")),
394 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
395 SUNXI_FUNCTION(0x0, "gpio_in"),
396 SUNXI_FUNCTION(0x1, "gpio_out"),
397 SUNXI_FUNCTION(0x3, "owa")), /* DOUT */
398 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
399 SUNXI_FUNCTION(0x0, "gpio_in"),
400 SUNXI_FUNCTION(0x1, "gpio_out")),
401 /* Hole */
402 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
403 SUNXI_FUNCTION(0x0, "gpio_in"),
404 SUNXI_FUNCTION(0x1, "gpio_out"),
405 SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
406 SUNXI_FUNCTION(0x3, "jtag")), /* MS1 */
407 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
408 SUNXI_FUNCTION(0x0, "gpio_in"),
409 SUNXI_FUNCTION(0x1, "gpio_out"),
410 SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
411 SUNXI_FUNCTION(0x3, "jtag")), /* DI1 */
412 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
413 SUNXI_FUNCTION(0x0, "gpio_in"),
414 SUNXI_FUNCTION(0x1, "gpio_out"),
415 SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
416 SUNXI_FUNCTION(0x3, "uart0")), /* TX */
417 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
418 SUNXI_FUNCTION(0x0, "gpio_in"),
419 SUNXI_FUNCTION(0x1, "gpio_out"),
420 SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
421 SUNXI_FUNCTION(0x3, "jtag")), /* DO1 */
422 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
423 SUNXI_FUNCTION(0x0, "gpio_in"),
424 SUNXI_FUNCTION(0x1, "gpio_out"),
425 SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
426 SUNXI_FUNCTION(0x3, "uart0")), /* RX */
427 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
428 SUNXI_FUNCTION(0x0, "gpio_in"),
429 SUNXI_FUNCTION(0x1, "gpio_out"),
430 SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
431 SUNXI_FUNCTION(0x3, "jtag")), /* CK1 */
432 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
433 SUNXI_FUNCTION(0x0, "gpio_in"),
434 SUNXI_FUNCTION(0x1, "gpio_out")),
435 /* Hole */
436 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
437 SUNXI_FUNCTION(0x0, "gpio_in"),
438 SUNXI_FUNCTION(0x1, "gpio_out"),
439 SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
440 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PG_EINT0 */
441 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
442 SUNXI_FUNCTION(0x0, "gpio_in"),
443 SUNXI_FUNCTION(0x1, "gpio_out"),
444 SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
445 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PG_EINT1 */
446 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
447 SUNXI_FUNCTION(0x0, "gpio_in"),
448 SUNXI_FUNCTION(0x1, "gpio_out"),
449 SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
450 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PG_EINT2 */
451 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
452 SUNXI_FUNCTION(0x0, "gpio_in"),
453 SUNXI_FUNCTION(0x1, "gpio_out"),
454 SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
455 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PG_EINT3 */
456 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
457 SUNXI_FUNCTION(0x0, "gpio_in"),
458 SUNXI_FUNCTION(0x1, "gpio_out"),
459 SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
460 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PG_EINT4 */
461 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
462 SUNXI_FUNCTION(0x0, "gpio_in"),
463 SUNXI_FUNCTION(0x1, "gpio_out"),
464 SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
465 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PG_EINT5 */
466 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
467 SUNXI_FUNCTION(0x0, "gpio_in"),
468 SUNXI_FUNCTION(0x1, "gpio_out"),
469 SUNXI_FUNCTION(0x2, "uart1"), /* TX */
470 SUNXI_FUNCTION(0x3, "spi1"), /* CS */
471 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* PG_EINT6 */
472 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
473 SUNXI_FUNCTION(0x0, "gpio_in"),
474 SUNXI_FUNCTION(0x1, "gpio_out"),
475 SUNXI_FUNCTION(0x2, "uart1"), /* RX */
476 SUNXI_FUNCTION(0x3, "spi1"), /* CLK */
477 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)), /* PG_EINT7 */
478 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
479 SUNXI_FUNCTION(0x0, "gpio_in"),
480 SUNXI_FUNCTION(0x1, "gpio_out"),
481 SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
482 SUNXI_FUNCTION(0x3, "spi1"), /* MOSI */
483 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 8)), /* PG_EINT8 */
484 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
485 SUNXI_FUNCTION(0x0, "gpio_in"),
486 SUNXI_FUNCTION(0x1, "gpio_out"),
487 SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
488 SUNXI_FUNCTION(0x3, "spi1"), /* MISO */
489 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 9)), /* PG_EINT9 */
490 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
491 SUNXI_FUNCTION(0x0, "gpio_in"),
492 SUNXI_FUNCTION(0x1, "gpio_out"),
493 SUNXI_FUNCTION(0x2, "i2s1"), /* BCLK */
494 SUNXI_FUNCTION(0x3, "uart3"), /* TX */
495 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 10)), /* PG_EINT10 */
496 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
497 SUNXI_FUNCTION(0x0, "gpio_in"),
498 SUNXI_FUNCTION(0x1, "gpio_out"),
499 SUNXI_FUNCTION(0x2, "i2s1"), /* LRCK */
500 SUNXI_FUNCTION(0x3, "uart3"), /* RX */
501 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 11)), /* PG_EINT11 */
502 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
503 SUNXI_FUNCTION(0x0, "gpio_in"),
504 SUNXI_FUNCTION(0x1, "gpio_out"),
505 SUNXI_FUNCTION(0x2, "i2s1"), /* DOUT */
506 SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
507 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 12)), /* PG_EINT12 */
508 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
509 SUNXI_FUNCTION(0x0, "gpio_in"),
510 SUNXI_FUNCTION(0x1, "gpio_out"),
511 SUNXI_FUNCTION(0x2, "i2s1"), /* DIN */
512 SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
513 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 13)), /* PG_EINT13 */
514 /* Hole */
515 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
516 SUNXI_FUNCTION(0x0, "gpio_in"),
517 SUNXI_FUNCTION(0x1, "gpio_out"),
518 SUNXI_FUNCTION(0x2, "i2c0"), /* SCK */
519 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)), /* PH_EINT0 */
520 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
521 SUNXI_FUNCTION(0x0, "gpio_in"),
522 SUNXI_FUNCTION(0x1, "gpio_out"),
523 SUNXI_FUNCTION(0x2, "i2c0"), /* SDA */
524 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT1 */
525 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
526 SUNXI_FUNCTION(0x0, "gpio_in"),
527 SUNXI_FUNCTION(0x1, "gpio_out"),
528 SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */
529 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)), /* PH_EINT2 */
530 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
531 SUNXI_FUNCTION(0x0, "gpio_in"),
532 SUNXI_FUNCTION(0x1, "gpio_out"),
533 SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */
534 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)), /* PH_EINT3 */
535 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
536 SUNXI_FUNCTION(0x0, "gpio_in"),
537 SUNXI_FUNCTION(0x1, "gpio_out"),
538 SUNXI_FUNCTION(0x2, "i2c2"), /* SCK */
539 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)), /* PH_EINT4 */
540 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
541 SUNXI_FUNCTION(0x0, "gpio_in"),
542 SUNXI_FUNCTION(0x1, "gpio_out"),
543 SUNXI_FUNCTION(0x2, "i2c2"), /* SDA */
544 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)), /* PH_EINT5 */
545 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
546 SUNXI_FUNCTION(0x0, "gpio_in"),
547 SUNXI_FUNCTION(0x1, "gpio_out"),
548 SUNXI_FUNCTION(0x2, "hdmi"), /* HSCL */
549 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)), /* PH_EINT6 */
550 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 7),
551 SUNXI_FUNCTION(0x0, "gpio_in"),
552 SUNXI_FUNCTION(0x1, "gpio_out"),
553 SUNXI_FUNCTION(0x2, "hdmi"), /* HSDA */
554 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)), /* PH_EINT7 */
555 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
556 SUNXI_FUNCTION(0x0, "gpio_in"),
557 SUNXI_FUNCTION(0x1, "gpio_out"),
558 SUNXI_FUNCTION(0x2, "hdmi"), /* HCEC */
559 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)), /* PH_EINT8 */
560 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
561 SUNXI_FUNCTION(0x0, "gpio_in"),
562 SUNXI_FUNCTION(0x1, "gpio_out"),
563 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)), /* PH_EINT9 */
564 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
565 SUNXI_FUNCTION(0x0, "gpio_in"),
566 SUNXI_FUNCTION(0x1, "gpio_out"),
567 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* PH_EINT10 */
568 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
569 SUNXI_FUNCTION(0x0, "gpio_in"),
570 SUNXI_FUNCTION(0x1, "gpio_out"),
571 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */
572};
573
574static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
575 .pins = sun8i_a83t_pins,
576 .npins = ARRAY_SIZE(sun8i_a83t_pins),
577 .irq_banks = 3,
578};
579
580static int sun8i_a83t_pinctrl_probe(struct platform_device *pdev)
581{
582 return sunxi_pinctrl_init(pdev,
583 &sun8i_a83t_pinctrl_data);
584}
585
586static const struct of_device_id sun8i_a83t_pinctrl_match[] = {
587 { .compatible = "allwinner,sun8i-a83t-pinctrl", },
588 {}
589};
590MODULE_DEVICE_TABLE(of, sun8i_a83t_pinctrl_match);
591
592static struct platform_driver sun8i_a83t_pinctrl_driver = {
593 .probe = sun8i_a83t_pinctrl_probe,
594 .driver = {
595 .name = "sun8i-a83t-pinctrl",
596 .of_match_table = sun8i_a83t_pinctrl_match,
597 },
598};
599module_platform_driver(sun8i_a83t_pinctrl_driver);
600
601MODULE_AUTHOR("Vishnu Patekar <vishnupatekar0510@gmail.com>");
602MODULE_DESCRIPTION("Allwinner a83t pinctrl driver");
603MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 38e0c7bdd2ac..dead97daca35 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -446,16 +446,6 @@ static const struct pinmux_ops sunxi_pmx_ops = {
446 .gpio_set_direction = sunxi_pmx_gpio_set_direction, 446 .gpio_set_direction = sunxi_pmx_gpio_set_direction,
447}; 447};
448 448
449static int sunxi_pinctrl_gpio_request(struct gpio_chip *chip, unsigned offset)
450{
451 return pinctrl_request_gpio(chip->base + offset);
452}
453
454static void sunxi_pinctrl_gpio_free(struct gpio_chip *chip, unsigned offset)
455{
456 pinctrl_free_gpio(chip->base + offset);
457}
458
459static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip, 449static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
460 unsigned offset) 450 unsigned offset)
461{ 451{
@@ -716,6 +706,7 @@ static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
716 unsigned long *out_hwirq, 706 unsigned long *out_hwirq,
717 unsigned int *out_type) 707 unsigned int *out_type)
718{ 708{
709 struct sunxi_pinctrl *pctl = d->host_data;
719 struct sunxi_desc_function *desc; 710 struct sunxi_desc_function *desc;
720 int pin, base; 711 int pin, base;
721 712
@@ -723,10 +714,9 @@ static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
723 return -EINVAL; 714 return -EINVAL;
724 715
725 base = PINS_PER_BANK * intspec[0]; 716 base = PINS_PER_BANK * intspec[0];
726 pin = base + intspec[1]; 717 pin = pctl->desc->pin_base + base + intspec[1];
727 718
728 desc = sunxi_pinctrl_desc_find_function_by_pin(d->host_data, 719 desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, pin, "irq");
729 pin, "irq");
730 if (!desc) 720 if (!desc)
731 return -EINVAL; 721 return -EINVAL;
732 722
@@ -956,8 +946,8 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
956 946
957 last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number; 947 last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number;
958 pctl->chip->owner = THIS_MODULE; 948 pctl->chip->owner = THIS_MODULE;
959 pctl->chip->request = sunxi_pinctrl_gpio_request, 949 pctl->chip->request = gpiochip_generic_request,
960 pctl->chip->free = sunxi_pinctrl_gpio_free, 950 pctl->chip->free = gpiochip_generic_free,
961 pctl->chip->direction_input = sunxi_pinctrl_gpio_direction_input, 951 pctl->chip->direction_input = sunxi_pinctrl_gpio_direction_input,
962 pctl->chip->direction_output = sunxi_pinctrl_gpio_direction_output, 952 pctl->chip->direction_output = sunxi_pinctrl_gpio_direction_output,
963 pctl->chip->get = sunxi_pinctrl_gpio_get, 953 pctl->chip->get = sunxi_pinctrl_gpio_get,
@@ -1029,7 +1019,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
1029 irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip, 1019 irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip,
1030 handle_edge_irq); 1020 handle_edge_irq);
1031 irq_set_chip_data(irqno, pctl); 1021 irq_set_chip_data(irqno, pctl);
1032 }; 1022 }
1033 1023
1034 for (i = 0; i < pctl->desc->irq_banks; i++) { 1024 for (i = 0; i < pctl->desc->irq_banks; i++) {
1035 /* Mask and clear all IRQs before registering a handler */ 1025 /* Mask and clear all IRQs before registering a handler */
diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig
index eab23ef9ddbf..ad907072e09f 100644
--- a/drivers/pinctrl/uniphier/Kconfig
+++ b/drivers/pinctrl/uniphier/Kconfig
@@ -1,32 +1,32 @@
1if ARCH_UNIPHIER 1if ARCH_UNIPHIER
2 2
3config PINCTRL_UNIPHIER_CORE 3config PINCTRL_UNIPHIER
4 bool 4 bool
5 select PINMUX 5 select PINMUX
6 select GENERIC_PINCONF 6 select GENERIC_PINCONF
7 7
8config PINCTRL_UNIPHIER_PH1_LD4 8config PINCTRL_UNIPHIER_PH1_LD4
9 tristate "UniPhier PH1-LD4 SoC pinctrl driver" 9 tristate "UniPhier PH1-LD4 SoC pinctrl driver"
10 select PINCTRL_UNIPHIER_CORE 10 select PINCTRL_UNIPHIER
11 11
12config PINCTRL_UNIPHIER_PH1_PRO4 12config PINCTRL_UNIPHIER_PH1_PRO4
13 tristate "UniPhier PH1-Pro4 SoC pinctrl driver" 13 tristate "UniPhier PH1-Pro4 SoC pinctrl driver"
14 select PINCTRL_UNIPHIER_CORE 14 select PINCTRL_UNIPHIER
15 15
16config PINCTRL_UNIPHIER_PH1_SLD8 16config PINCTRL_UNIPHIER_PH1_SLD8
17 tristate "UniPhier PH1-sLD8 SoC pinctrl driver" 17 tristate "UniPhier PH1-sLD8 SoC pinctrl driver"
18 select PINCTRL_UNIPHIER_CORE 18 select PINCTRL_UNIPHIER
19 19
20config PINCTRL_UNIPHIER_PH1_PRO5 20config PINCTRL_UNIPHIER_PH1_PRO5
21 tristate "UniPhier PH1-Pro5 SoC pinctrl driver" 21 tristate "UniPhier PH1-Pro5 SoC pinctrl driver"
22 select PINCTRL_UNIPHIER_CORE 22 select PINCTRL_UNIPHIER
23 23
24config PINCTRL_UNIPHIER_PROXSTREAM2 24config PINCTRL_UNIPHIER_PROXSTREAM2
25 tristate "UniPhier ProXstream2 SoC pinctrl driver" 25 tristate "UniPhier ProXstream2 SoC pinctrl driver"
26 select PINCTRL_UNIPHIER_CORE 26 select PINCTRL_UNIPHIER
27 27
28config PINCTRL_UNIPHIER_PH1_LD6B 28config PINCTRL_UNIPHIER_PH1_LD6B
29 tristate "UniPhier PH1-LD6b SoC pinctrl driver" 29 tristate "UniPhier PH1-LD6b SoC pinctrl driver"
30 select PINCTRL_UNIPHIER_CORE 30 select PINCTRL_UNIPHIER
31 31
32endif 32endif
diff --git a/drivers/pinctrl/uniphier/Makefile b/drivers/pinctrl/uniphier/Makefile
index e215b1097297..e7ce9670306c 100644
--- a/drivers/pinctrl/uniphier/Makefile
+++ b/drivers/pinctrl/uniphier/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_PINCTRL_UNIPHIER_CORE) += pinctrl-uniphier-core.o 1obj-y += pinctrl-uniphier-core.o
2 2
3obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD4) += pinctrl-ph1-ld4.o 3obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD4) += pinctrl-ph1-ld4.o
4obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO4) += pinctrl-ph1-pro4.o 4obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO4) += pinctrl-ph1-pro4.o
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c b/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
index 7beb87e8f499..a7056dccfa53 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
@@ -537,6 +537,8 @@ static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
537 0, 0}; 537 0, 0};
538static const unsigned nand_cs1_pins[] = {22, 23}; 538static const unsigned nand_cs1_pins[] = {22, 23};
539static const unsigned nand_cs1_muxvals[] = {0, 0}; 539static const unsigned nand_cs1_muxvals[] = {0, 0};
540static const unsigned sd_pins[] = {44, 45, 46, 47, 48, 49, 50, 51, 52};
541static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
540static const unsigned uart0_pins[] = {85, 88}; 542static const unsigned uart0_pins[] = {85, 88};
541static const unsigned uart0_muxvals[] = {1, 1}; 543static const unsigned uart0_muxvals[] = {1, 1};
542static const unsigned uart1_pins[] = {155, 156}; 544static const unsigned uart1_pins[] = {155, 156};
@@ -619,6 +621,7 @@ static const struct uniphier_pinctrl_group ph1_ld4_groups[] = {
619 UNIPHIER_PINCTRL_GROUP(i2c3), 621 UNIPHIER_PINCTRL_GROUP(i2c3),
620 UNIPHIER_PINCTRL_GROUP(nand), 622 UNIPHIER_PINCTRL_GROUP(nand),
621 UNIPHIER_PINCTRL_GROUP(nand_cs1), 623 UNIPHIER_PINCTRL_GROUP(nand_cs1),
624 UNIPHIER_PINCTRL_GROUP(sd),
622 UNIPHIER_PINCTRL_GROUP(uart0), 625 UNIPHIER_PINCTRL_GROUP(uart0),
623 UNIPHIER_PINCTRL_GROUP(uart1), 626 UNIPHIER_PINCTRL_GROUP(uart1),
624 UNIPHIER_PINCTRL_GROUP(uart1b), 627 UNIPHIER_PINCTRL_GROUP(uart1b),
@@ -776,6 +779,7 @@ static const char * const i2c1_groups[] = {"i2c1"};
776static const char * const i2c2_groups[] = {"i2c2"}; 779static const char * const i2c2_groups[] = {"i2c2"};
777static const char * const i2c3_groups[] = {"i2c3"}; 780static const char * const i2c3_groups[] = {"i2c3"};
778static const char * const nand_groups[] = {"nand", "nand_cs1"}; 781static const char * const nand_groups[] = {"nand", "nand_cs1"};
782static const char * const sd_groups[] = {"sd"};
779static const char * const uart0_groups[] = {"uart0"}; 783static const char * const uart0_groups[] = {"uart0"};
780static const char * const uart1_groups[] = {"uart1", "uart1b"}; 784static const char * const uart1_groups[] = {"uart1", "uart1b"};
781static const char * const uart2_groups[] = {"uart2"}; 785static const char * const uart2_groups[] = {"uart2"};
@@ -831,6 +835,7 @@ static const struct uniphier_pinmux_function ph1_ld4_functions[] = {
831 UNIPHIER_PINMUX_FUNCTION(i2c2), 835 UNIPHIER_PINMUX_FUNCTION(i2c2),
832 UNIPHIER_PINMUX_FUNCTION(i2c3), 836 UNIPHIER_PINMUX_FUNCTION(i2c3),
833 UNIPHIER_PINMUX_FUNCTION(nand), 837 UNIPHIER_PINMUX_FUNCTION(nand),
838 UNIPHIER_PINMUX_FUNCTION(sd),
834 UNIPHIER_PINMUX_FUNCTION(uart0), 839 UNIPHIER_PINMUX_FUNCTION(uart0),
835 UNIPHIER_PINMUX_FUNCTION(uart1), 840 UNIPHIER_PINMUX_FUNCTION(uart1),
836 UNIPHIER_PINMUX_FUNCTION(uart2), 841 UNIPHIER_PINMUX_FUNCTION(uart2),
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
index 9720e697fbc1..1824831bb4da 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
@@ -761,6 +761,8 @@ static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
761 0, 0}; 761 0, 0};
762static const unsigned nand_cs1_pins[] = {37, 38}; 762static const unsigned nand_cs1_pins[] = {37, 38};
763static const unsigned nand_cs1_muxvals[] = {0, 0}; 763static const unsigned nand_cs1_muxvals[] = {0, 0};
764static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55};
765static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
764static const unsigned uart0_pins[] = {135, 136}; 766static const unsigned uart0_pins[] = {135, 136};
765static const unsigned uart0_muxvals[] = {3, 3}; 767static const unsigned uart0_muxvals[] = {3, 3};
766static const unsigned uart0b_pins[] = {11, 12}; 768static const unsigned uart0b_pins[] = {11, 12};
@@ -866,6 +868,7 @@ static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = {
866 UNIPHIER_PINCTRL_GROUP(i2c3), 868 UNIPHIER_PINCTRL_GROUP(i2c3),
867 UNIPHIER_PINCTRL_GROUP(nand), 869 UNIPHIER_PINCTRL_GROUP(nand),
868 UNIPHIER_PINCTRL_GROUP(nand_cs1), 870 UNIPHIER_PINCTRL_GROUP(nand_cs1),
871 UNIPHIER_PINCTRL_GROUP(sd),
869 UNIPHIER_PINCTRL_GROUP(uart0), 872 UNIPHIER_PINCTRL_GROUP(uart0),
870 UNIPHIER_PINCTRL_GROUP(uart0b), 873 UNIPHIER_PINCTRL_GROUP(uart0b),
871 UNIPHIER_PINCTRL_GROUP(uart1), 874 UNIPHIER_PINCTRL_GROUP(uart1),
@@ -1136,6 +1139,7 @@ static const char * const i2c1_groups[] = {"i2c1"};
1136static const char * const i2c2_groups[] = {"i2c2"}; 1139static const char * const i2c2_groups[] = {"i2c2"};
1137static const char * const i2c3_groups[] = {"i2c3"}; 1140static const char * const i2c3_groups[] = {"i2c3"};
1138static const char * const nand_groups[] = {"nand", "nand_cs1"}; 1141static const char * const nand_groups[] = {"nand", "nand_cs1"};
1142static const char * const sd_groups[] = {"sd"};
1139static const char * const uart0_groups[] = {"uart0", "uart0b"}; 1143static const char * const uart0_groups[] = {"uart0", "uart0b"};
1140static const char * const uart1_groups[] = {"uart1", "uart1b"}; 1144static const char * const uart1_groups[] = {"uart1", "uart1b"};
1141static const char * const uart2_groups[] = {"uart2", "uart2b"}; 1145static const char * const uart2_groups[] = {"uart2", "uart2b"};
@@ -1219,6 +1223,7 @@ static const struct uniphier_pinmux_function ph1_ld6b_functions[] = {
1219 UNIPHIER_PINMUX_FUNCTION(i2c2), 1223 UNIPHIER_PINMUX_FUNCTION(i2c2),
1220 UNIPHIER_PINMUX_FUNCTION(i2c3), 1224 UNIPHIER_PINMUX_FUNCTION(i2c3),
1221 UNIPHIER_PINMUX_FUNCTION(nand), 1225 UNIPHIER_PINMUX_FUNCTION(nand),
1226 UNIPHIER_PINMUX_FUNCTION(sd),
1222 UNIPHIER_PINMUX_FUNCTION(uart0), 1227 UNIPHIER_PINMUX_FUNCTION(uart0),
1223 UNIPHIER_PINMUX_FUNCTION(uart1), 1228 UNIPHIER_PINMUX_FUNCTION(uart1),
1224 UNIPHIER_PINMUX_FUNCTION(uart2), 1229 UNIPHIER_PINMUX_FUNCTION(uart2),
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c b/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
index 96921e40da5f..ec8e92dfaf8c 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
@@ -1031,6 +1031,11 @@ static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1031 0, 0}; 1031 0, 0};
1032static const unsigned nand_cs1_pins[] = {131, 132}; 1032static const unsigned nand_cs1_pins[] = {131, 132};
1033static const unsigned nand_cs1_muxvals[] = {1, 1}; 1033static const unsigned nand_cs1_muxvals[] = {1, 1};
1034static const unsigned sd_pins[] = {150, 151, 152, 153, 154, 155, 156, 157, 158};
1035static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
1036static const unsigned sd1_pins[] = {319, 320, 321, 322, 323, 324, 325, 326,
1037 327};
1038static const unsigned sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
1034static const unsigned uart0_pins[] = {127, 128}; 1039static const unsigned uart0_pins[] = {127, 128};
1035static const unsigned uart0_muxvals[] = {0, 0}; 1040static const unsigned uart0_muxvals[] = {0, 0};
1036static const unsigned uart1_pins[] = {129, 130}; 1041static const unsigned uart1_pins[] = {129, 130};
@@ -1140,6 +1145,8 @@ static const struct uniphier_pinctrl_group ph1_pro4_groups[] = {
1140 UNIPHIER_PINCTRL_GROUP(i2c6), 1145 UNIPHIER_PINCTRL_GROUP(i2c6),
1141 UNIPHIER_PINCTRL_GROUP(nand), 1146 UNIPHIER_PINCTRL_GROUP(nand),
1142 UNIPHIER_PINCTRL_GROUP(nand_cs1), 1147 UNIPHIER_PINCTRL_GROUP(nand_cs1),
1148 UNIPHIER_PINCTRL_GROUP(sd),
1149 UNIPHIER_PINCTRL_GROUP(sd1),
1143 UNIPHIER_PINCTRL_GROUP(uart0), 1150 UNIPHIER_PINCTRL_GROUP(uart0),
1144 UNIPHIER_PINCTRL_GROUP(uart1), 1151 UNIPHIER_PINCTRL_GROUP(uart1),
1145 UNIPHIER_PINCTRL_GROUP(uart2), 1152 UNIPHIER_PINCTRL_GROUP(uart2),
@@ -1412,6 +1419,8 @@ static const char * const i2c2_groups[] = {"i2c2"};
1412static const char * const i2c3_groups[] = {"i2c3"}; 1419static const char * const i2c3_groups[] = {"i2c3"};
1413static const char * const i2c6_groups[] = {"i2c6"}; 1420static const char * const i2c6_groups[] = {"i2c6"};
1414static const char * const nand_groups[] = {"nand", "nand_cs1"}; 1421static const char * const nand_groups[] = {"nand", "nand_cs1"};
1422static const char * const sd_groups[] = {"sd"};
1423static const char * const sd1_groups[] = {"sd1"};
1415static const char * const uart0_groups[] = {"uart0"}; 1424static const char * const uart0_groups[] = {"uart0"};
1416static const char * const uart1_groups[] = {"uart1"}; 1425static const char * const uart1_groups[] = {"uart1"};
1417static const char * const uart2_groups[] = {"uart2"}; 1426static const char * const uart2_groups[] = {"uart2"};
@@ -1498,6 +1507,8 @@ static const struct uniphier_pinmux_function ph1_pro4_functions[] = {
1498 UNIPHIER_PINMUX_FUNCTION(i2c3), 1507 UNIPHIER_PINMUX_FUNCTION(i2c3),
1499 UNIPHIER_PINMUX_FUNCTION(i2c6), 1508 UNIPHIER_PINMUX_FUNCTION(i2c6),
1500 UNIPHIER_PINMUX_FUNCTION(nand), 1509 UNIPHIER_PINMUX_FUNCTION(nand),
1510 UNIPHIER_PINMUX_FUNCTION(sd),
1511 UNIPHIER_PINMUX_FUNCTION(sd1),
1501 UNIPHIER_PINMUX_FUNCTION(uart0), 1512 UNIPHIER_PINMUX_FUNCTION(uart0),
1502 UNIPHIER_PINMUX_FUNCTION(uart1), 1513 UNIPHIER_PINMUX_FUNCTION(uart1),
1503 UNIPHIER_PINMUX_FUNCTION(uart2), 1514 UNIPHIER_PINMUX_FUNCTION(uart2),
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c b/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
index 9af455978058..e3d648eae85a 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
@@ -818,6 +818,8 @@ static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
818 0, 0}; 818 0, 0};
819static const unsigned nand_cs1_pins[] = {26, 27}; 819static const unsigned nand_cs1_pins[] = {26, 27};
820static const unsigned nand_cs1_muxvals[] = {0, 0}; 820static const unsigned nand_cs1_muxvals[] = {0, 0};
821static const unsigned sd_pins[] = {250, 251, 252, 253, 254, 255, 256, 257, 258};
822static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
821static const unsigned uart0_pins[] = {47, 48}; 823static const unsigned uart0_pins[] = {47, 48};
822static const unsigned uart0_muxvals[] = {0, 0}; 824static const unsigned uart0_muxvals[] = {0, 0};
823static const unsigned uart0b_pins[] = {227, 228}; 825static const unsigned uart0b_pins[] = {227, 228};
@@ -930,6 +932,7 @@ static const struct uniphier_pinctrl_group ph1_pro5_groups[] = {
930 UNIPHIER_PINCTRL_GROUP(i2c5b), 932 UNIPHIER_PINCTRL_GROUP(i2c5b),
931 UNIPHIER_PINCTRL_GROUP(i2c5c), 933 UNIPHIER_PINCTRL_GROUP(i2c5c),
932 UNIPHIER_PINCTRL_GROUP(i2c6), 934 UNIPHIER_PINCTRL_GROUP(i2c6),
935 UNIPHIER_PINCTRL_GROUP(sd),
933 UNIPHIER_PINCTRL_GROUP(uart0), 936 UNIPHIER_PINCTRL_GROUP(uart0),
934 UNIPHIER_PINCTRL_GROUP(uart0b), 937 UNIPHIER_PINCTRL_GROUP(uart0b),
935 UNIPHIER_PINCTRL_GROUP(uart1), 938 UNIPHIER_PINCTRL_GROUP(uart1),
@@ -1209,6 +1212,7 @@ static const char * const i2c3_groups[] = {"i2c3"};
1209static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"}; 1212static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"};
1210static const char * const i2c6_groups[] = {"i2c6"}; 1213static const char * const i2c6_groups[] = {"i2c6"};
1211static const char * const nand_groups[] = {"nand", "nand_cs1"}; 1214static const char * const nand_groups[] = {"nand", "nand_cs1"};
1215static const char * const sd_groups[] = {"sd"};
1212static const char * const uart0_groups[] = {"uart0", "uart0b"}; 1216static const char * const uart0_groups[] = {"uart0", "uart0b"};
1213static const char * const uart1_groups[] = {"uart1"}; 1217static const char * const uart1_groups[] = {"uart1"};
1214static const char * const uart2_groups[] = {"uart2"}; 1218static const char * const uart2_groups[] = {"uart2"};
@@ -1296,6 +1300,7 @@ static const struct uniphier_pinmux_function ph1_pro5_functions[] = {
1296 UNIPHIER_PINMUX_FUNCTION(i2c5), 1300 UNIPHIER_PINMUX_FUNCTION(i2c5),
1297 UNIPHIER_PINMUX_FUNCTION(i2c6), 1301 UNIPHIER_PINMUX_FUNCTION(i2c6),
1298 UNIPHIER_PINMUX_FUNCTION(nand), 1302 UNIPHIER_PINMUX_FUNCTION(nand),
1303 UNIPHIER_PINMUX_FUNCTION(sd),
1299 UNIPHIER_PINMUX_FUNCTION(uart0), 1304 UNIPHIER_PINMUX_FUNCTION(uart0),
1300 UNIPHIER_PINMUX_FUNCTION(uart1), 1305 UNIPHIER_PINMUX_FUNCTION(uart1),
1301 UNIPHIER_PINMUX_FUNCTION(uart2), 1306 UNIPHIER_PINMUX_FUNCTION(uart2),
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
index 2df8bbecebfc..c3700a33a5da 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
@@ -450,6 +450,8 @@ static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
450 0, 0}; 450 0, 0};
451static const unsigned nand_cs1_pins[] = {22, 23}; 451static const unsigned nand_cs1_pins[] = {22, 23};
452static const unsigned nand_cs1_muxvals[] = {0, 0}; 452static const unsigned nand_cs1_muxvals[] = {0, 0};
453static const unsigned sd_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, 40};
454static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
453static const unsigned uart0_pins[] = {70, 71}; 455static const unsigned uart0_pins[] = {70, 71};
454static const unsigned uart0_muxvals[] = {3, 3}; 456static const unsigned uart0_muxvals[] = {3, 3};
455static const unsigned uart1_pins[] = {114, 115}; 457static const unsigned uart1_pins[] = {114, 115};
@@ -536,6 +538,7 @@ static const struct uniphier_pinctrl_group ph1_sld8_groups[] = {
536 UNIPHIER_PINCTRL_GROUP(i2c3), 538 UNIPHIER_PINCTRL_GROUP(i2c3),
537 UNIPHIER_PINCTRL_GROUP(nand), 539 UNIPHIER_PINCTRL_GROUP(nand),
538 UNIPHIER_PINCTRL_GROUP(nand_cs1), 540 UNIPHIER_PINCTRL_GROUP(nand_cs1),
541 UNIPHIER_PINCTRL_GROUP(sd),
539 UNIPHIER_PINCTRL_GROUP(uart0), 542 UNIPHIER_PINCTRL_GROUP(uart0),
540 UNIPHIER_PINCTRL_GROUP(uart1), 543 UNIPHIER_PINCTRL_GROUP(uart1),
541 UNIPHIER_PINCTRL_GROUP(uart2), 544 UNIPHIER_PINCTRL_GROUP(uart2),
@@ -684,6 +687,7 @@ static const char * const i2c1_groups[] = {"i2c1"};
684static const char * const i2c2_groups[] = {"i2c2"}; 687static const char * const i2c2_groups[] = {"i2c2"};
685static const char * const i2c3_groups[] = {"i2c3"}; 688static const char * const i2c3_groups[] = {"i2c3"};
686static const char * const nand_groups[] = {"nand", "nand_cs1"}; 689static const char * const nand_groups[] = {"nand", "nand_cs1"};
690static const char * const sd_groups[] = {"sd"};
687static const char * const uart0_groups[] = {"uart0"}; 691static const char * const uart0_groups[] = {"uart0"};
688static const char * const uart1_groups[] = {"uart1"}; 692static const char * const uart1_groups[] = {"uart1"};
689static const char * const uart2_groups[] = {"uart2"}; 693static const char * const uart2_groups[] = {"uart2"};
@@ -739,6 +743,7 @@ static const struct uniphier_pinmux_function ph1_sld8_functions[] = {
739 UNIPHIER_PINMUX_FUNCTION(i2c2), 743 UNIPHIER_PINMUX_FUNCTION(i2c2),
740 UNIPHIER_PINMUX_FUNCTION(i2c3), 744 UNIPHIER_PINMUX_FUNCTION(i2c3),
741 UNIPHIER_PINMUX_FUNCTION(nand), 745 UNIPHIER_PINMUX_FUNCTION(nand),
746 UNIPHIER_PINMUX_FUNCTION(sd),
742 UNIPHIER_PINMUX_FUNCTION(uart0), 747 UNIPHIER_PINMUX_FUNCTION(uart0),
743 UNIPHIER_PINMUX_FUNCTION(uart1), 748 UNIPHIER_PINMUX_FUNCTION(uart1),
744 UNIPHIER_PINMUX_FUNCTION(uart2), 749 UNIPHIER_PINMUX_FUNCTION(uart2),
diff --git a/drivers/pinctrl/uniphier/pinctrl-proxstream2.c b/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
index 3f036e236ad9..bc00d7591c59 100644
--- a/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
+++ b/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
@@ -751,6 +751,8 @@ static const unsigned nand_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
751 8, 8}; 751 8, 8};
752static const unsigned nand_cs1_pins[] = {37, 38}; 752static const unsigned nand_cs1_pins[] = {37, 38};
753static const unsigned nand_cs1_muxvals[] = {8, 8}; 753static const unsigned nand_cs1_muxvals[] = {8, 8};
754static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55};
755static const unsigned sd_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8};
754static const unsigned uart0_pins[] = {217, 218}; 756static const unsigned uart0_pins[] = {217, 218};
755static const unsigned uart0_muxvals[] = {8, 8}; 757static const unsigned uart0_muxvals[] = {8, 8};
756static const unsigned uart0b_pins[] = {179, 180}; 758static const unsigned uart0b_pins[] = {179, 180};
@@ -857,6 +859,7 @@ static const struct uniphier_pinctrl_group proxstream2_groups[] = {
857 UNIPHIER_PINCTRL_GROUP(i2c6), 859 UNIPHIER_PINCTRL_GROUP(i2c6),
858 UNIPHIER_PINCTRL_GROUP(nand), 860 UNIPHIER_PINCTRL_GROUP(nand),
859 UNIPHIER_PINCTRL_GROUP(nand_cs1), 861 UNIPHIER_PINCTRL_GROUP(nand_cs1),
862 UNIPHIER_PINCTRL_GROUP(sd),
860 UNIPHIER_PINCTRL_GROUP(uart0), 863 UNIPHIER_PINCTRL_GROUP(uart0),
861 UNIPHIER_PINCTRL_GROUP(uart0b), 864 UNIPHIER_PINCTRL_GROUP(uart0b),
862 UNIPHIER_PINCTRL_GROUP(uart1), 865 UNIPHIER_PINCTRL_GROUP(uart1),
@@ -1128,6 +1131,7 @@ static const char * const i2c3_groups[] = {"i2c3"};
1128static const char * const i2c5_groups[] = {"i2c5"}; 1131static const char * const i2c5_groups[] = {"i2c5"};
1129static const char * const i2c6_groups[] = {"i2c6"}; 1132static const char * const i2c6_groups[] = {"i2c6"};
1130static const char * const nand_groups[] = {"nand", "nand_cs1"}; 1133static const char * const nand_groups[] = {"nand", "nand_cs1"};
1134static const char * const sd_groups[] = {"sd"};
1131static const char * const uart0_groups[] = {"uart0", "uart0b"}; 1135static const char * const uart0_groups[] = {"uart0", "uart0b"};
1132static const char * const uart1_groups[] = {"uart1"}; 1136static const char * const uart1_groups[] = {"uart1"};
1133static const char * const uart2_groups[] = {"uart2"}; 1137static const char * const uart2_groups[] = {"uart2"};
@@ -1213,6 +1217,7 @@ static const struct uniphier_pinmux_function proxstream2_functions[] = {
1213 UNIPHIER_PINMUX_FUNCTION(i2c5), 1217 UNIPHIER_PINMUX_FUNCTION(i2c5),
1214 UNIPHIER_PINMUX_FUNCTION(i2c6), 1218 UNIPHIER_PINMUX_FUNCTION(i2c6),
1215 UNIPHIER_PINMUX_FUNCTION(nand), 1219 UNIPHIER_PINMUX_FUNCTION(nand),
1220 UNIPHIER_PINMUX_FUNCTION(sd),
1216 UNIPHIER_PINMUX_FUNCTION(uart0), 1221 UNIPHIER_PINMUX_FUNCTION(uart0),
1217 UNIPHIER_PINMUX_FUNCTION(uart1), 1222 UNIPHIER_PINMUX_FUNCTION(uart1),
1218 UNIPHIER_PINMUX_FUNCTION(uart2), 1223 UNIPHIER_PINMUX_FUNCTION(uart2),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index 918f3b643f1b..589872cc8adb 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -539,6 +539,12 @@ static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin,
539 unsigned reg, reg_end, shift, mask; 539 unsigned reg, reg_end, shift, mask;
540 int ret; 540 int ret;
541 541
542 /* some pins need input-enabling */
543 ret = uniphier_conf_pin_input_enable(pctldev,
544 &pctldev->desc->pins[pin], 1);
545 if (ret)
546 return ret;
547
542 reg = UNIPHIER_PINCTRL_PINMUX_BASE + pin * mux_bits / 32 * reg_stride; 548 reg = UNIPHIER_PINCTRL_PINMUX_BASE + pin * mux_bits / 32 * reg_stride;
543 reg_end = reg + reg_stride; 549 reg_end = reg + reg_stride;
544 shift = pin * mux_bits % 32; 550 shift = pin * mux_bits % 32;
@@ -563,9 +569,7 @@ static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin,
563 return ret; 569 return ret;
564 } 570 }
565 571
566 /* some pins need input-enabling */ 572 return 0;
567 return uniphier_conf_pin_input_enable(pctldev,
568 &pctldev->desc->pins[pin], 1);
569} 573}
570 574
571static int uniphier_pmx_set_mux(struct pinctrl_dev *pctldev, 575static int uniphier_pmx_set_mux(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index c15316b003c5..fb22d3f62480 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -486,16 +486,6 @@ static struct pinctrl_desc wmt_desc = {
486 .confops = &wmt_pinconf_ops, 486 .confops = &wmt_pinconf_ops,
487}; 487};
488 488
489static int wmt_gpio_request(struct gpio_chip *chip, unsigned offset)
490{
491 return pinctrl_request_gpio(chip->base + offset);
492}
493
494static void wmt_gpio_free(struct gpio_chip *chip, unsigned offset)
495{
496 pinctrl_free_gpio(chip->base + offset);
497}
498
499static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 489static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
500{ 490{
501 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev); 491 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -560,8 +550,8 @@ static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
560static struct gpio_chip wmt_gpio_chip = { 550static struct gpio_chip wmt_gpio_chip = {
561 .label = "gpio-wmt", 551 .label = "gpio-wmt",
562 .owner = THIS_MODULE, 552 .owner = THIS_MODULE,
563 .request = wmt_gpio_request, 553 .request = gpiochip_generic_request,
564 .free = wmt_gpio_free, 554 .free = gpiochip_generic_free,
565 .get_direction = wmt_gpio_get_direction, 555 .get_direction = wmt_gpio_get_direction,
566 .direction_input = wmt_gpio_direction_input, 556 .direction_input = wmt_gpio_direction_input,
567 .direction_output = wmt_gpio_direction_output, 557 .direction_output = wmt_gpio_direction_output,
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index cdad4d95b20e..805c749ac1ad 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -179,8 +179,8 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
179 /* check event type */ 179 /* check event type */
180 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); 180 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
181 181
182 dev_dbg(pps->dev, "PPS event at %ld.%09ld\n", 182 dev_dbg(pps->dev, "PPS event at %lld.%09ld\n",
183 ts->ts_real.tv_sec, ts->ts_real.tv_nsec); 183 (s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
184 184
185 timespec_to_pps_ktime(&ts_real, ts->ts_real); 185 timespec_to_pps_ktime(&ts_real, ts->ts_real);
186 186
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 454536c49315..9c780740fb82 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -887,6 +887,8 @@ static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
887static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, 887static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
888 struct mvs_slot_info *slot, u32 slot_idx) 888 struct mvs_slot_info *slot, u32 slot_idx)
889{ 889{
890 if (!slot)
891 return;
890 if (!slot->task) 892 if (!slot->task)
891 return; 893 return;
892 if (!sas_protocol_ata(task->task_proto)) 894 if (!sas_protocol_ata(task->task_proto))
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 0a2168e69bbc..e7649ed3f667 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -226,16 +226,20 @@ int scsi_dh_add_device(struct scsi_device *sdev)
226 226
227 drv = scsi_dh_find_driver(sdev); 227 drv = scsi_dh_find_driver(sdev);
228 if (drv) 228 if (drv)
229 devinfo = scsi_dh_lookup(drv); 229 devinfo = __scsi_dh_lookup(drv);
230 if (devinfo) 230 if (devinfo)
231 err = scsi_dh_handler_attach(sdev, devinfo); 231 err = scsi_dh_handler_attach(sdev, devinfo);
232 return err; 232 return err;
233} 233}
234 234
235void scsi_dh_remove_device(struct scsi_device *sdev) 235void scsi_dh_release_device(struct scsi_device *sdev)
236{ 236{
237 if (sdev->handler) 237 if (sdev->handler)
238 scsi_dh_handler_detach(sdev); 238 scsi_dh_handler_detach(sdev);
239}
240
241void scsi_dh_remove_device(struct scsi_device *sdev)
242{
239 device_remove_file(&sdev->sdev_gendev, &scsi_dh_state_attr); 243 device_remove_file(&sdev->sdev_gendev, &scsi_dh_state_attr);
240} 244}
241 245
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 644bb7339b55..4d01cdb1b348 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -173,9 +173,11 @@ extern struct async_domain scsi_sd_probe_domain;
173/* scsi_dh.c */ 173/* scsi_dh.c */
174#ifdef CONFIG_SCSI_DH 174#ifdef CONFIG_SCSI_DH
175int scsi_dh_add_device(struct scsi_device *sdev); 175int scsi_dh_add_device(struct scsi_device *sdev);
176void scsi_dh_release_device(struct scsi_device *sdev);
176void scsi_dh_remove_device(struct scsi_device *sdev); 177void scsi_dh_remove_device(struct scsi_device *sdev);
177#else 178#else
178static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; } 179static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; }
180static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
179static inline void scsi_dh_remove_device(struct scsi_device *sdev) { } 181static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
180#endif 182#endif
181 183
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b333389f248f..dff8fafb741c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -399,6 +399,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
399 399
400 sdev = container_of(work, struct scsi_device, ew.work); 400 sdev = container_of(work, struct scsi_device, ew.work);
401 401
402 scsi_dh_release_device(sdev);
403
402 parent = sdev->sdev_gendev.parent; 404 parent = sdev->sdev_gendev.parent;
403 405
404 spin_lock_irqsave(sdev->host->host_lock, flags); 406 spin_lock_irqsave(sdev->host->host_lock, flags);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 4a3cf9ba152f..fb36810ae89a 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -657,7 +657,7 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
657 "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n", 657 "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
658 intspec[0], intspec[1], intspec[2]); 658 intspec[0], intspec[1], intspec[2]);
659 659
660 if (d->of_node != controller) 660 if (irq_domain_get_of_node(d) != controller)
661 return -EINVAL; 661 return -EINVAL;
662 if (intsize != 4) 662 if (intsize != 4)
663 return -EINVAL; 663 return -EINVAL;
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 23685e74917e..bd2c69f85949 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -116,7 +116,7 @@ static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
116 if (ret) 116 if (ret)
117 goto error_ret; 117 goto error_ret;
118 118
119 for (i = 0; i < num_read; i++) 119 for (i = 0; i < num_read / sizeof(u16); i++)
120 *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i); 120 *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i);
121 121
122 if (copy_to_user(buf, rx, num_read)) 122 if (copy_to_user(buf, rx, num_read))
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 3f7715c9968b..47fc00a3f63b 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -915,11 +915,12 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
915 case IIO_CHAN_INFO_OFFSET: 915 case IIO_CHAN_INFO_OFFSET:
916 if (chan->type == IIO_TEMP) { 916 if (chan->type == IIO_TEMP) {
917 /* The calculated value from the ADC is in Kelvin, we 917 /* The calculated value from the ADC is in Kelvin, we
918 * want Celsius for hwmon so the offset is 918 * want Celsius for hwmon so the offset is -273.15
919 * -272.15 * scale 919 * The offset is applied before scaling so it is
920 * actually -213.15 * 4 / 1.012 = -1079.644268
920 */ 921 */
921 *val = -1075; 922 *val = -1079;
922 *val2 = 691699; 923 *val2 = 644268;
923 924
924 return IIO_VAL_INT_PLUS_MICRO; 925 return IIO_VAL_INT_PLUS_MICRO;
925 } 926 }
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 98af3b1f2d2a..aa5ab6c80ed4 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -7,7 +7,7 @@
7#include <linux/workqueue.h> 7#include <linux/workqueue.h>
8#include <linux/tty.h> 8#include <linux/tty.h>
9#include <linux/tty_flip.h> 9#include <linux/tty_flip.h>
10#include <asm/cmpxchg.h> 10#include <linux/atomic.h>
11 11
12#include "speakup.h" 12#include "speakup.h"
13 13
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 0bae8cc6c23a..ca920b0ecf8f 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -932,7 +932,7 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
932 932
933 if (data->soc == SOC_ARCH_EXYNOS5260) 933 if (data->soc == SOC_ARCH_EXYNOS5260)
934 emul_con = EXYNOS5260_EMUL_CON; 934 emul_con = EXYNOS5260_EMUL_CON;
935 if (data->soc == SOC_ARCH_EXYNOS5433) 935 else if (data->soc == SOC_ARCH_EXYNOS5433)
936 emul_con = EXYNOS5433_TMU_EMUL_CON; 936 emul_con = EXYNOS5433_TMU_EMUL_CON;
937 else if (data->soc == SOC_ARCH_EXYNOS7) 937 else if (data->soc == SOC_ARCH_EXYNOS7)
938 emul_con = EXYNOS7_TMU_REG_EMUL_CON; 938 emul_con = EXYNOS7_TMU_REG_EMUL_CON;
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 21d01a491405..e508939daea3 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -80,10 +80,6 @@ int serial8250_tx_dma(struct uart_8250_port *p)
80 return 0; 80 return 0;
81 81
82 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 82 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
83 if (dma->tx_size < p->port.fifosize) {
84 ret = -EINVAL;
85 goto err;
86 }
87 83
88 desc = dmaengine_prep_slave_single(dma->txchan, 84 desc = dmaengine_prep_slave_single(dma->txchan,
89 dma->tx_addr + xmit->tail, 85 dma->tx_addr + xmit->tail,
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c79d33676672..c47d3e480586 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -147,6 +147,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
147 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 147 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
148 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { 148 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
149 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 149 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
150 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
150 } 151 }
151 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 152 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
152 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || 153 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 43291f93afeb..97ffe3997273 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2191,6 +2191,10 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2191 } 2191 }
2192 /* Fast path - was this the last TRB in the TD for this URB? */ 2192 /* Fast path - was this the last TRB in the TD for this URB? */
2193 } else if (event_trb == td->last_trb) { 2193 } else if (event_trb == td->last_trb) {
2194 if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
2195 return finish_td(xhci, td, event_trb, event, ep,
2196 status, false);
2197
2194 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2198 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2195 td->urb->actual_length = 2199 td->urb->actual_length =
2196 td->urb->transfer_buffer_length - 2200 td->urb->transfer_buffer_length -
@@ -2242,6 +2246,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2242 td->urb->actual_length += 2246 td->urb->actual_length +=
2243 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2247 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2244 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2248 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2249
2250 if (trb_comp_code == COMP_SHORT_TX) {
2251 xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
2252 td->urb_length_set = true;
2253 return 0;
2254 }
2245 } 2255 }
2246 2256
2247 return finish_td(xhci, td, event_trb, event, ep, status, false); 2257 return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -2274,6 +2284,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2274 u32 trb_comp_code; 2284 u32 trb_comp_code;
2275 int ret = 0; 2285 int ret = 0;
2276 int td_num = 0; 2286 int td_num = 0;
2287 bool handling_skipped_tds = false;
2277 2288
2278 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 2289 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2279 xdev = xhci->devs[slot_id]; 2290 xdev = xhci->devs[slot_id];
@@ -2410,6 +2421,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2410 ep->skip = true; 2421 ep->skip = true;
2411 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); 2422 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2412 goto cleanup; 2423 goto cleanup;
2424 case COMP_PING_ERR:
2425 ep->skip = true;
2426 xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
2427 goto cleanup;
2413 default: 2428 default:
2414 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 2429 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2415 status = 0; 2430 status = 0;
@@ -2546,13 +2561,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2546 ep, &status); 2561 ep, &status);
2547 2562
2548cleanup: 2563cleanup:
2564
2565
2566 handling_skipped_tds = ep->skip &&
2567 trb_comp_code != COMP_MISSED_INT &&
2568 trb_comp_code != COMP_PING_ERR;
2569
2549 /* 2570 /*
2550 * Do not update event ring dequeue pointer if ep->skip is set. 2571 * Do not update event ring dequeue pointer if we're in a loop
2551 * Will roll back to continue process missed tds. 2572 * processing missed tds.
2552 */ 2573 */
2553 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { 2574 if (!handling_skipped_tds)
2554 inc_deq(xhci, xhci->event_ring); 2575 inc_deq(xhci, xhci->event_ring);
2555 }
2556 2576
2557 if (ret) { 2577 if (ret) {
2558 urb = td->urb; 2578 urb = td->urb;
@@ -2587,7 +2607,7 @@ cleanup:
2587 * Process them as short transfer until reach the td pointed by 2607 * Process them as short transfer until reach the td pointed by
2588 * the event. 2608 * the event.
2589 */ 2609 */
2590 } while (ep->skip && trb_comp_code != COMP_MISSED_INT); 2610 } while (handling_skipped_tds);
2591 2611
2592 return 0; 2612 return 0;
2593} 2613}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 70f2b8a2e6cf..1bd9232ff76f 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -391,9 +391,20 @@ static int omap2430_musb_init(struct musb *musb)
391 } 391 }
392 musb->isr = omap2430_musb_interrupt; 392 musb->isr = omap2430_musb_interrupt;
393 393
394 /*
395 * Enable runtime PM for musb parent (this driver). We can't
396 * do it earlier as struct musb is not yet allocated and we
397 * need to touch the musb registers for runtime PM.
398 */
399 pm_runtime_enable(glue->dev);
400 status = pm_runtime_get_sync(glue->dev);
401 if (status < 0)
402 goto err1;
403
394 status = pm_runtime_get_sync(dev); 404 status = pm_runtime_get_sync(dev);
395 if (status < 0) { 405 if (status < 0) {
396 dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status); 406 dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
407 pm_runtime_put_sync(glue->dev);
397 goto err1; 408 goto err1;
398 } 409 }
399 410
@@ -426,6 +437,7 @@ static int omap2430_musb_init(struct musb *musb)
426 phy_power_on(musb->phy); 437 phy_power_on(musb->phy);
427 438
428 pm_runtime_put_noidle(musb->controller); 439 pm_runtime_put_noidle(musb->controller);
440 pm_runtime_put_noidle(glue->dev);
429 return 0; 441 return 0;
430 442
431err1: 443err1:
@@ -626,7 +638,11 @@ static int omap2430_probe(struct platform_device *pdev)
626 goto err2; 638 goto err2;
627 } 639 }
628 640
629 pm_runtime_enable(&pdev->dev); 641 /*
642 * Note that we cannot enable PM runtime yet for this
643 * driver as we need struct musb initialized first.
644 * See omap2430_musb_init above.
645 */
630 646
631 ret = platform_device_add(musb); 647 ret = platform_device_add(musb);
632 if (ret) { 648 if (ret) {
@@ -675,11 +691,12 @@ static int omap2430_runtime_resume(struct device *dev)
675 struct omap2430_glue *glue = dev_get_drvdata(dev); 691 struct omap2430_glue *glue = dev_get_drvdata(dev);
676 struct musb *musb = glue_to_musb(glue); 692 struct musb *musb = glue_to_musb(glue);
677 693
678 if (musb) { 694 if (!musb)
679 omap2430_low_level_init(musb); 695 return -EPROBE_DEFER;
680 musb_writel(musb->mregs, OTG_INTERFSEL, 696
681 musb->context.otg_interfsel); 697 omap2430_low_level_init(musb);
682 } 698 musb_writel(musb->mregs, OTG_INTERFSEL,
699 musb->context.otg_interfsel);
683 700
684 return 0; 701 return 0;
685} 702}
diff --git a/drivers/usb/renesas_usbhs/rcar2.c b/drivers/usb/renesas_usbhs/rcar2.c
index 8fc15c0ba339..277160bc6f25 100644
--- a/drivers/usb/renesas_usbhs/rcar2.c
+++ b/drivers/usb/renesas_usbhs/rcar2.c
@@ -13,7 +13,6 @@
13#include <linux/gpio.h> 13#include <linux/gpio.h>
14#include <linux/of_gpio.h> 14#include <linux/of_gpio.h>
15#include <linux/phy/phy.h> 15#include <linux/phy/phy.h>
16#include <linux/platform_data/gpio-rcar.h>
17#include <linux/usb/phy.h> 16#include <linux/usb/phy.h>
18#include "common.h" 17#include "common.h"
19#include "rcar2.h" 18#include "rcar2.h"
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 4772862b71a7..d3f767448a72 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -183,10 +183,17 @@ static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
183 return vq->acked_features & (1ULL << bit); 183 return vq->acked_features & (1ULL << bit);
184} 184}
185 185
186#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
186static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq) 187static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
187{ 188{
188 return vq->is_le; 189 return vq->is_le;
189} 190}
191#else
192static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
193{
194 return virtio_legacy_is_little_endian() || vq->is_le;
195}
196#endif
190 197
191/* Memory accessors */ 198/* Memory accessors */
192static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val) 199static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 1aaf89300621..92f394927f24 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1093,6 +1093,7 @@ static void fbcon_init(struct vc_data *vc, int init)
1093 con_copy_unimap(vc, svc); 1093 con_copy_unimap(vc, svc);
1094 1094
1095 ops = info->fbcon_par; 1095 ops = info->fbcon_par;
1096 ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
1096 p->con_rotate = initial_rotation; 1097 p->con_rotate = initial_rotation;
1097 set_blitting_type(vc, info); 1098 set_blitting_type(vc, info);
1098 1099
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 4bfff349b1fb..95d293b7445a 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -114,6 +114,20 @@ static int efifb_setup(char *options)
114 return 0; 114 return 0;
115} 115}
116 116
117static inline bool fb_base_is_valid(void)
118{
119 if (screen_info.lfb_base)
120 return true;
121
122 if (!(screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE))
123 return false;
124
125 if (screen_info.ext_lfb_base)
126 return true;
127
128 return false;
129}
130
117static int efifb_probe(struct platform_device *dev) 131static int efifb_probe(struct platform_device *dev)
118{ 132{
119 struct fb_info *info; 133 struct fb_info *info;
@@ -141,7 +155,7 @@ static int efifb_probe(struct platform_device *dev)
141 screen_info.lfb_depth = 32; 155 screen_info.lfb_depth = 32;
142 if (!screen_info.pages) 156 if (!screen_info.pages)
143 screen_info.pages = 1; 157 screen_info.pages = 1;
144 if (!screen_info.lfb_base) { 158 if (!fb_base_is_valid()) {
145 printk(KERN_DEBUG "efifb: invalid framebuffer address\n"); 159 printk(KERN_DEBUG "efifb: invalid framebuffer address\n");
146 return -ENODEV; 160 return -ENODEV;
147 } 161 }
@@ -160,6 +174,14 @@ static int efifb_probe(struct platform_device *dev)
160 } 174 }
161 175
162 efifb_fix.smem_start = screen_info.lfb_base; 176 efifb_fix.smem_start = screen_info.lfb_base;
177
178 if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) {
179 u64 ext_lfb_base;
180
181 ext_lfb_base = (u64)(unsigned long)screen_info.ext_lfb_base << 32;
182 efifb_fix.smem_start |= ext_lfb_base;
183 }
184
163 efifb_defined.bits_per_pixel = screen_info.lfb_depth; 185 efifb_defined.bits_per_pixel = screen_info.lfb_depth;
164 efifb_defined.xres = screen_info.lfb_width; 186 efifb_defined.xres = screen_info.lfb_width;
165 efifb_defined.yres = screen_info.lfb_height; 187 efifb_defined.yres = screen_info.lfb_height;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index b823fac91c92..8c6f247ba81d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2584,7 +2584,7 @@ static long btrfs_fallocate(struct file *file, int mode,
2584 alloc_start); 2584 alloc_start);
2585 if (ret) 2585 if (ret)
2586 goto out; 2586 goto out;
2587 } else { 2587 } else if (offset + len > inode->i_size) {
2588 /* 2588 /*
2589 * If we are fallocating from the end of the file onward we 2589 * If we are fallocating from the end of the file onward we
2590 * need to zero out the end of the page if i_size lands in the 2590 * need to zero out the end of the page if i_size lands in the
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 3e3e6130637f..8d20f3b1cab0 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4641,7 +4641,7 @@ locked:
4641 4641
4642 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { 4642 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
4643 ret = -EINVAL; 4643 ret = -EINVAL;
4644 goto out_bargs; 4644 goto out_bctl;
4645 } 4645 }
4646 4646
4647do_balance: 4647do_balance:
@@ -4655,12 +4655,15 @@ do_balance:
4655 need_unlock = false; 4655 need_unlock = false;
4656 4656
4657 ret = btrfs_balance(bctl, bargs); 4657 ret = btrfs_balance(bctl, bargs);
4658 bctl = NULL;
4658 4659
4659 if (arg) { 4660 if (arg) {
4660 if (copy_to_user(arg, bargs, sizeof(*bargs))) 4661 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4661 ret = -EFAULT; 4662 ret = -EFAULT;
4662 } 4663 }
4663 4664
4665out_bctl:
4666 kfree(bctl);
4664out_bargs: 4667out_bargs:
4665 kfree(bargs); 4668 kfree(bargs);
4666out_unlock: 4669out_unlock:
diff --git a/fs/file.c b/fs/file.c
index 6c672ad329e9..c6986dce0334 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -56,6 +56,9 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
56 __free_fdtable(container_of(rcu, struct fdtable, rcu)); 56 __free_fdtable(container_of(rcu, struct fdtable, rcu));
57} 57}
58 58
59#define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
60#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
61
59/* 62/*
60 * Expand the fdset in the files_struct. Called with the files spinlock 63 * Expand the fdset in the files_struct. Called with the files spinlock
61 * held for write. 64 * held for write.
@@ -77,6 +80,11 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
77 memset((char *)(nfdt->open_fds) + cpy, 0, set); 80 memset((char *)(nfdt->open_fds) + cpy, 0, set);
78 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); 81 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
79 memset((char *)(nfdt->close_on_exec) + cpy, 0, set); 82 memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
83
84 cpy = BITBIT_SIZE(ofdt->max_fds);
85 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
86 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
87 memset(cpy+(char *)nfdt->full_fds_bits, 0, set);
80} 88}
81 89
82static struct fdtable * alloc_fdtable(unsigned int nr) 90static struct fdtable * alloc_fdtable(unsigned int nr)
@@ -115,12 +123,14 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
115 fdt->fd = data; 123 fdt->fd = data;
116 124
117 data = alloc_fdmem(max_t(size_t, 125 data = alloc_fdmem(max_t(size_t,
118 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); 126 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES));
119 if (!data) 127 if (!data)
120 goto out_arr; 128 goto out_arr;
121 fdt->open_fds = data; 129 fdt->open_fds = data;
122 data += nr / BITS_PER_BYTE; 130 data += nr / BITS_PER_BYTE;
123 fdt->close_on_exec = data; 131 fdt->close_on_exec = data;
132 data += nr / BITS_PER_BYTE;
133 fdt->full_fds_bits = data;
124 134
125 return fdt; 135 return fdt;
126 136
@@ -226,17 +236,22 @@ static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
226 236
227static inline void __clear_close_on_exec(int fd, struct fdtable *fdt) 237static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
228{ 238{
229 __clear_bit(fd, fdt->close_on_exec); 239 if (test_bit(fd, fdt->close_on_exec))
240 __clear_bit(fd, fdt->close_on_exec);
230} 241}
231 242
232static inline void __set_open_fd(int fd, struct fdtable *fdt) 243static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
233{ 244{
234 __set_bit(fd, fdt->open_fds); 245 __set_bit(fd, fdt->open_fds);
246 fd /= BITS_PER_LONG;
247 if (!~fdt->open_fds[fd])
248 __set_bit(fd, fdt->full_fds_bits);
235} 249}
236 250
237static inline void __clear_open_fd(int fd, struct fdtable *fdt) 251static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
238{ 252{
239 __clear_bit(fd, fdt->open_fds); 253 __clear_bit(fd, fdt->open_fds);
254 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
240} 255}
241 256
242static int count_open_files(struct fdtable *fdt) 257static int count_open_files(struct fdtable *fdt)
@@ -280,6 +295,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
280 new_fdt->max_fds = NR_OPEN_DEFAULT; 295 new_fdt->max_fds = NR_OPEN_DEFAULT;
281 new_fdt->close_on_exec = newf->close_on_exec_init; 296 new_fdt->close_on_exec = newf->close_on_exec_init;
282 new_fdt->open_fds = newf->open_fds_init; 297 new_fdt->open_fds = newf->open_fds_init;
298 new_fdt->full_fds_bits = newf->full_fds_bits_init;
283 new_fdt->fd = &newf->fd_array[0]; 299 new_fdt->fd = &newf->fd_array[0];
284 300
285 spin_lock(&oldf->file_lock); 301 spin_lock(&oldf->file_lock);
@@ -323,6 +339,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
323 339
324 memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8); 340 memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
325 memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8); 341 memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
342 memcpy(new_fdt->full_fds_bits, old_fdt->full_fds_bits, BITBIT_SIZE(open_files));
326 343
327 for (i = open_files; i != 0; i--) { 344 for (i = open_files; i != 0; i--) {
328 struct file *f = *old_fds++; 345 struct file *f = *old_fds++;
@@ -454,10 +471,25 @@ struct files_struct init_files = {
454 .fd = &init_files.fd_array[0], 471 .fd = &init_files.fd_array[0],
455 .close_on_exec = init_files.close_on_exec_init, 472 .close_on_exec = init_files.close_on_exec_init,
456 .open_fds = init_files.open_fds_init, 473 .open_fds = init_files.open_fds_init,
474 .full_fds_bits = init_files.full_fds_bits_init,
457 }, 475 },
458 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), 476 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
459}; 477};
460 478
479static unsigned long find_next_fd(struct fdtable *fdt, unsigned long start)
480{
481 unsigned long maxfd = fdt->max_fds;
482 unsigned long maxbit = maxfd / BITS_PER_LONG;
483 unsigned long bitbit = start / BITS_PER_LONG;
484
485 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
486 if (bitbit > maxfd)
487 return maxfd;
488 if (bitbit > start)
489 start = bitbit;
490 return find_next_zero_bit(fdt->open_fds, maxfd, start);
491}
492
461/* 493/*
462 * allocate a file descriptor, mark it busy. 494 * allocate a file descriptor, mark it busy.
463 */ 495 */
@@ -476,7 +508,7 @@ repeat:
476 fd = files->next_fd; 508 fd = files->next_fd;
477 509
478 if (fd < fdt->max_fds) 510 if (fd < fdt->max_fds)
479 fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd); 511 fd = find_next_fd(fdt, fd);
480 512
481 /* 513 /*
482 * N.B. For clone tasks sharing a files structure, this test 514 * N.B. For clone tasks sharing a files structure, this test
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 091a36444972..7378169e90be 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -778,19 +778,24 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
778 struct wb_writeback_work *base_work, 778 struct wb_writeback_work *base_work,
779 bool skip_if_busy) 779 bool skip_if_busy)
780{ 780{
781 int next_memcg_id = 0; 781 struct bdi_writeback *last_wb = NULL;
782 struct bdi_writeback *wb; 782 struct bdi_writeback *wb = list_entry(&bdi->wb_list,
783 struct wb_iter iter; 783 struct bdi_writeback, bdi_node);
784 784
785 might_sleep(); 785 might_sleep();
786restart: 786restart:
787 rcu_read_lock(); 787 rcu_read_lock();
788 bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) { 788 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
789 DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done); 789 DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
790 struct wb_writeback_work fallback_work; 790 struct wb_writeback_work fallback_work;
791 struct wb_writeback_work *work; 791 struct wb_writeback_work *work;
792 long nr_pages; 792 long nr_pages;
793 793
794 if (last_wb) {
795 wb_put(last_wb);
796 last_wb = NULL;
797 }
798
794 /* SYNC_ALL writes out I_DIRTY_TIME too */ 799 /* SYNC_ALL writes out I_DIRTY_TIME too */
795 if (!wb_has_dirty_io(wb) && 800 if (!wb_has_dirty_io(wb) &&
796 (base_work->sync_mode == WB_SYNC_NONE || 801 (base_work->sync_mode == WB_SYNC_NONE ||
@@ -819,12 +824,22 @@ restart:
819 824
820 wb_queue_work(wb, work); 825 wb_queue_work(wb, work);
821 826
822 next_memcg_id = wb->memcg_css->id + 1; 827 /*
828 * Pin @wb so that it stays on @bdi->wb_list. This allows
829 * continuing iteration from @wb after dropping and
830 * regrabbing rcu read lock.
831 */
832 wb_get(wb);
833 last_wb = wb;
834
823 rcu_read_unlock(); 835 rcu_read_unlock();
824 wb_wait_for_completion(bdi, &fallback_work_done); 836 wb_wait_for_completion(bdi, &fallback_work_done);
825 goto restart; 837 goto restart;
826 } 838 }
827 rcu_read_unlock(); 839 rcu_read_unlock();
840
841 if (last_wb)
842 wb_put(last_wb);
828} 843}
829 844
830#else /* CONFIG_CGROUP_WRITEBACK */ 845#else /* CONFIG_CGROUP_WRITEBACK */
@@ -1857,12 +1872,11 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1857 rcu_read_lock(); 1872 rcu_read_lock();
1858 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1873 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1859 struct bdi_writeback *wb; 1874 struct bdi_writeback *wb;
1860 struct wb_iter iter;
1861 1875
1862 if (!bdi_has_dirty_io(bdi)) 1876 if (!bdi_has_dirty_io(bdi))
1863 continue; 1877 continue;
1864 1878
1865 bdi_for_each_wb(wb, bdi, &iter, 0) 1879 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
1866 wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), 1880 wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages),
1867 false, reason); 1881 false, reason);
1868 } 1882 }
@@ -1894,11 +1908,10 @@ static void wakeup_dirtytime_writeback(struct work_struct *w)
1894 rcu_read_lock(); 1908 rcu_read_lock();
1895 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1909 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1896 struct bdi_writeback *wb; 1910 struct bdi_writeback *wb;
1897 struct wb_iter iter;
1898 1911
1899 bdi_for_each_wb(wb, bdi, &iter, 0) 1912 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
1900 if (!list_empty(&bdi->wb.b_dirty_time)) 1913 if (!list_empty(&wb->b_dirty_time))
1901 wb_wakeup(&bdi->wb); 1914 wb_wakeup(wb);
1902 } 1915 }
1903 rcu_read_unlock(); 1916 rcu_read_unlock();
1904 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 1917 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ee5aa4daaea0..ce38b4ccc9ab 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1658,12 +1658,13 @@ send_response:
1658 if (ret < 0) { 1658 if (ret < 0) {
1659 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1659 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1660 response = DLM_MASTER_RESP_ERROR; 1660 response = DLM_MASTER_RESP_ERROR;
1661 spin_unlock(&res->spinlock);
1661 dlm_lockres_put(res); 1662 dlm_lockres_put(res);
1662 } else { 1663 } else {
1663 dispatched = 1; 1664 dispatched = 1;
1664 __dlm_lockres_grab_inflight_worker(dlm, res); 1665 __dlm_lockres_grab_inflight_worker(dlm, res);
1666 spin_unlock(&res->spinlock);
1665 } 1667 }
1666 spin_unlock(&res->spinlock);
1667 } else { 1668 } else {
1668 if (res) 1669 if (res)
1669 dlm_lockres_put(res); 1670 dlm_lockres_put(res);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 3d90ad7ff91f..58eaa5c0d387 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1723,8 +1723,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1723 } else { 1723 } else {
1724 dispatched = 1; 1724 dispatched = 1;
1725 __dlm_lockres_grab_inflight_worker(dlm, res); 1725 __dlm_lockres_grab_inflight_worker(dlm, res);
1726 spin_unlock(&res->spinlock);
1726 } 1727 }
1727 spin_unlock(&res->spinlock);
1728 } else { 1728 } else {
1729 /* put.. incase we are not the master */ 1729 /* put.. incase we are not the master */
1730 spin_unlock(&res->spinlock); 1730 spin_unlock(&res->spinlock);
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 84d693d37428..871fcb67be97 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -81,11 +81,11 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
81 if (len == 0) 81 if (len == 0)
82 return 0; 82 return 0;
83 83
84 old_file = ovl_path_open(old, O_RDONLY); 84 old_file = ovl_path_open(old, O_LARGEFILE | O_RDONLY);
85 if (IS_ERR(old_file)) 85 if (IS_ERR(old_file))
86 return PTR_ERR(old_file); 86 return PTR_ERR(old_file);
87 87
88 new_file = ovl_path_open(new, O_WRONLY); 88 new_file = ovl_path_open(new, O_LARGEFILE | O_WRONLY);
89 if (IS_ERR(new_file)) { 89 if (IS_ERR(new_file)) {
90 error = PTR_ERR(new_file); 90 error = PTR_ERR(new_file);
91 goto out_fput; 91 goto out_fput;
@@ -267,7 +267,7 @@ out:
267 267
268out_cleanup: 268out_cleanup:
269 ovl_cleanup(wdir, newdentry); 269 ovl_cleanup(wdir, newdentry);
270 goto out; 270 goto out2;
271} 271}
272 272
273/* 273/*
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index d9da5a4e9382..ec0c2a050043 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -363,6 +363,9 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
363 ovl_path_upper(dentry, &realpath); 363 ovl_path_upper(dentry, &realpath);
364 } 364 }
365 365
366 if (realpath.dentry->d_flags & DCACHE_OP_SELECT_INODE)
367 return realpath.dentry->d_op->d_select_inode(realpath.dentry, file_flags);
368
366 return d_backing_inode(realpath.dentry); 369 return d_backing_inode(realpath.dentry);
367} 370}
368 371
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 79073d68b475..e38ee0fed24a 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -544,6 +544,7 @@ static void ovl_put_super(struct super_block *sb)
544 mntput(ufs->upper_mnt); 544 mntput(ufs->upper_mnt);
545 for (i = 0; i < ufs->numlower; i++) 545 for (i = 0; i < ufs->numlower; i++)
546 mntput(ufs->lower_mnt[i]); 546 mntput(ufs->lower_mnt[i]);
547 kfree(ufs->lower_mnt);
547 548
548 kfree(ufs->config.lowerdir); 549 kfree(ufs->config.lowerdir);
549 kfree(ufs->config.upperdir); 550 kfree(ufs->config.upperdir);
@@ -1048,6 +1049,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1048 oe->lowerstack[i].dentry = stack[i].dentry; 1049 oe->lowerstack[i].dentry = stack[i].dentry;
1049 oe->lowerstack[i].mnt = ufs->lower_mnt[i]; 1050 oe->lowerstack[i].mnt = ufs->lower_mnt[i];
1050 } 1051 }
1052 kfree(stack);
1051 1053
1052 root_dentry->d_fsdata = oe; 1054 root_dentry->d_fsdata = oe;
1053 1055
diff --git a/fs/proc/array.c b/fs/proc/array.c
index f60f0121e331..eed2050db9be 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -375,7 +375,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
375static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, 375static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
376 struct pid *pid, struct task_struct *task, int whole) 376 struct pid *pid, struct task_struct *task, int whole)
377{ 377{
378 unsigned long vsize, eip, esp, wchan = ~0UL; 378 unsigned long vsize, eip, esp, wchan = 0;
379 int priority, nice; 379 int priority, nice;
380 int tty_pgrp = -1, tty_nr = 0; 380 int tty_pgrp = -1, tty_nr = 0;
381 sigset_t sigign, sigcatch; 381 sigset_t sigign, sigcatch;
@@ -507,7 +507,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
507 seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL); 507 seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL);
508 seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL); 508 seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL);
509 seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL); 509 seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL);
510 seq_put_decimal_ull(m, ' ', wchan); 510
511 /*
512 * We used to output the absolute kernel address, but that's an
513 * information leak - so instead we show a 0/1 flag here, to signal
514 * to user-space whether there's a wchan field in /proc/PID/wchan.
515 *
516 * This works with older implementations of procps as well.
517 */
518 if (wchan)
519 seq_puts(m, " 1");
520 else
521 seq_puts(m, " 0");
522
511 seq_put_decimal_ull(m, ' ', 0); 523 seq_put_decimal_ull(m, ' ', 0);
512 seq_put_decimal_ull(m, ' ', 0); 524 seq_put_decimal_ull(m, ' ', 0);
513 seq_put_decimal_ll(m, ' ', task->exit_signal); 525 seq_put_decimal_ll(m, ' ', task->exit_signal);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b25eee4cead5..29595af32866 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -430,13 +430,10 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
430 430
431 wchan = get_wchan(task); 431 wchan = get_wchan(task);
432 432
433 if (lookup_symbol_name(wchan, symname) < 0) { 433 if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
434 if (!ptrace_may_access(task, PTRACE_MODE_READ))
435 return 0;
436 seq_printf(m, "%lu", wchan);
437 } else {
438 seq_printf(m, "%s", symname); 434 seq_printf(m, "%s", symname);
439 } 435 else
436 seq_putc(m, '0');
440 437
441 return 0; 438 return 0;
442} 439}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index d3ebf2e61853..9155a5a0d3b9 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -27,7 +27,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
27{ 27{
28 struct sysinfo i; 28 struct sysinfo i;
29 unsigned long committed; 29 unsigned long committed;
30 struct vmalloc_info vmi;
31 long cached; 30 long cached;
32 long available; 31 long available;
33 unsigned long pagecache; 32 unsigned long pagecache;
@@ -49,8 +48,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
49 if (cached < 0) 48 if (cached < 0)
50 cached = 0; 49 cached = 0;
51 50
52 get_vmalloc_info(&vmi);
53
54 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 51 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
55 pages[lru] = global_page_state(NR_LRU_BASE + lru); 52 pages[lru] = global_page_state(NR_LRU_BASE + lru);
56 53
@@ -191,8 +188,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
191 K(vm_commit_limit()), 188 K(vm_commit_limit()),
192 K(committed), 189 K(committed),
193 (unsigned long)VMALLOC_TOTAL >> 10, 190 (unsigned long)VMALLOC_TOTAL >> 10,
194 vmi.used >> 10, 191 0ul, // used to be vmalloc 'used'
195 vmi.largest_chunk >> 10 192 0ul // used to be vmalloc 'largest_chunk'
196#ifdef CONFIG_MEMORY_FAILURE 193#ifdef CONFIG_MEMORY_FAILURE
197 , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10) 194 , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
198#endif 195#endif
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index a94cbebbc33d..eb1973bad80b 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -35,7 +35,7 @@ typedef atomic_t atomic_long_t;
35#endif 35#endif
36 36
37#define ATOMIC_LONG_READ_OP(mo) \ 37#define ATOMIC_LONG_READ_OP(mo) \
38static inline long atomic_long_read##mo(atomic_long_t *l) \ 38static inline long atomic_long_read##mo(const atomic_long_t *l) \
39{ \ 39{ \
40 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ 40 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
41 \ 41 \
@@ -112,19 +112,23 @@ static inline void atomic_long_dec(atomic_long_t *l)
112 ATOMIC_LONG_PFX(_dec)(v); 112 ATOMIC_LONG_PFX(_dec)(v);
113} 113}
114 114
115static inline void atomic_long_add(long i, atomic_long_t *l) 115#define ATOMIC_LONG_OP(op) \
116{ 116static inline void \
117 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; 117atomic_long_##op(long i, atomic_long_t *l) \
118 118{ \
119 ATOMIC_LONG_PFX(_add)(i, v); 119 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
120 \
121 ATOMIC_LONG_PFX(_##op)(i, v); \
120} 122}
121 123
122static inline void atomic_long_sub(long i, atomic_long_t *l) 124ATOMIC_LONG_OP(add)
123{ 125ATOMIC_LONG_OP(sub)
124 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; 126ATOMIC_LONG_OP(and)
127ATOMIC_LONG_OP(or)
128ATOMIC_LONG_OP(xor)
129ATOMIC_LONG_OP(andnot)
125 130
126 ATOMIC_LONG_PFX(_sub)(i, v); 131#undef ATOMIC_LONG_OP
127}
128 132
129static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) 133static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
130{ 134{
@@ -154,19 +158,24 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
154 return ATOMIC_LONG_PFX(_add_negative)(i, v); 158 return ATOMIC_LONG_PFX(_add_negative)(i, v);
155} 159}
156 160
157static inline long atomic_long_inc_return(atomic_long_t *l) 161#define ATOMIC_LONG_INC_DEC_OP(op, mo) \
158{ 162static inline long \
159 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; 163atomic_long_##op##_return##mo(atomic_long_t *l) \
160 164{ \
161 return (long)ATOMIC_LONG_PFX(_inc_return)(v); 165 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
162} 166 \
163 167 return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \
164static inline long atomic_long_dec_return(atomic_long_t *l)
165{
166 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
167
168 return (long)ATOMIC_LONG_PFX(_dec_return)(v);
169} 168}
169ATOMIC_LONG_INC_DEC_OP(inc,)
170ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
171ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
172ATOMIC_LONG_INC_DEC_OP(inc, _release)
173ATOMIC_LONG_INC_DEC_OP(dec,)
174ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
175ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
176ATOMIC_LONG_INC_DEC_OP(dec, _release)
177
178#undef ATOMIC_LONG_INC_DEC_OP
170 179
171static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) 180static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
172{ 181{
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index d4d7e337fdcb..74f1a3704d7a 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -127,7 +127,7 @@ ATOMIC_OP(xor, ^)
127 * Atomically reads the value of @v. 127 * Atomically reads the value of @v.
128 */ 128 */
129#ifndef atomic_read 129#ifndef atomic_read
130#define atomic_read(v) ACCESS_ONCE((v)->counter) 130#define atomic_read(v) READ_ONCE((v)->counter)
131#endif 131#endif
132 132
133/** 133/**
@@ -137,7 +137,7 @@ ATOMIC_OP(xor, ^)
137 * 137 *
138 * Atomically sets the value of @v to @i. 138 * Atomically sets the value of @v to @i.
139 */ 139 */
140#define atomic_set(v, i) (((v)->counter) = (i)) 140#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
141 141
142#include <linux/irqflags.h> 142#include <linux/irqflags.h>
143 143
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index d4f9fb4e53df..fd694cfd678a 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -20,7 +20,7 @@
20static inline void 20static inline void
21__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) 21__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
22{ 22{
23 if (unlikely(atomic_dec_return(count) < 0)) 23 if (unlikely(atomic_dec_return_acquire(count) < 0))
24 fail_fn(count); 24 fail_fn(count);
25} 25}
26 26
@@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
35static inline int 35static inline int
36__mutex_fastpath_lock_retval(atomic_t *count) 36__mutex_fastpath_lock_retval(atomic_t *count)
37{ 37{
38 if (unlikely(atomic_dec_return(count) < 0)) 38 if (unlikely(atomic_dec_return_acquire(count) < 0))
39 return -1; 39 return -1;
40 return 0; 40 return 0;
41} 41}
@@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
56static inline void 56static inline void
57__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 57__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
58{ 58{
59 if (unlikely(atomic_inc_return(count) <= 0)) 59 if (unlikely(atomic_inc_return_release(count) <= 0))
60 fail_fn(count); 60 fail_fn(count);
61} 61}
62 62
@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
80static inline int 80static inline int
81__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 81__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
82{ 82{
83 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) 83 if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1))
84 return 1; 84 return 1;
85 return 0; 85 return 0;
86} 86}
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index f169ec064785..a6b4a7bd6ac9 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
31 * to ensure that any waiting tasks are woken up by the 31 * to ensure that any waiting tasks are woken up by the
32 * unlock slow path. 32 * unlock slow path.
33 */ 33 */
34 if (likely(atomic_xchg(count, -1) != 1)) 34 if (likely(atomic_xchg_acquire(count, -1) != 1))
35 fail_fn(count); 35 fail_fn(count);
36} 36}
37 37
@@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
46static inline int 46static inline int
47__mutex_fastpath_lock_retval(atomic_t *count) 47__mutex_fastpath_lock_retval(atomic_t *count)
48{ 48{
49 if (unlikely(atomic_xchg(count, 0) != 1)) 49 if (unlikely(atomic_xchg_acquire(count, 0) != 1))
50 if (likely(atomic_xchg(count, -1) != 1)) 50 if (likely(atomic_xchg(count, -1) != 1))
51 return -1; 51 return -1;
52 return 0; 52 return 0;
@@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
67static inline void 67static inline void
68__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 68__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
69{ 69{
70 if (unlikely(atomic_xchg(count, 1) != 0)) 70 if (unlikely(atomic_xchg_release(count, 1) != 0))
71 fail_fn(count); 71 fail_fn(count);
72} 72}
73 73
@@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
91static inline int 91static inline int
92__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 92__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
93{ 93{
94 int prev = atomic_xchg(count, 0); 94 int prev = atomic_xchg_acquire(count, 0);
95 95
96 if (unlikely(prev < 0)) { 96 if (unlikely(prev < 0)) {
97 /* 97 /*
@@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
105 * owner's unlock path needlessly, but that's not a problem 105 * owner's unlock path needlessly, but that's not a problem
106 * in practice. ] 106 * in practice. ]
107 */ 107 */
108 prev = atomic_xchg(count, prev); 108 prev = atomic_xchg_acquire(count, prev);
109 if (prev < 0) 109 if (prev < 0)
110 prev = 0; 110 prev = 0;
111 } 111 }
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 29c57b2cb344..3eabbbbfd578 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -30,9 +30,19 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma,
30#endif 30#endif
31 31
32#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 32#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
33#ifdef CONFIG_TRANSPARENT_HUGEPAGE
33extern int pmdp_set_access_flags(struct vm_area_struct *vma, 34extern int pmdp_set_access_flags(struct vm_area_struct *vma,
34 unsigned long address, pmd_t *pmdp, 35 unsigned long address, pmd_t *pmdp,
35 pmd_t entry, int dirty); 36 pmd_t entry, int dirty);
37#else
38static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
39 unsigned long address, pmd_t *pmdp,
40 pmd_t entry, int dirty)
41{
42 BUILD_BUG();
43 return 0;
44}
45#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
36#endif 46#endif
37 47
38#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 48#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -64,12 +74,12 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
64 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); 74 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
65 return r; 75 return r;
66} 76}
67#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 77#else
68static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 78static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
69 unsigned long address, 79 unsigned long address,
70 pmd_t *pmdp) 80 pmd_t *pmdp)
71{ 81{
72 BUG(); 82 BUILD_BUG();
73 return 0; 83 return 0;
74} 84}
75#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 85#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -81,8 +91,21 @@ int ptep_clear_flush_young(struct vm_area_struct *vma,
81#endif 91#endif
82 92
83#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 93#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
84int pmdp_clear_flush_young(struct vm_area_struct *vma, 94#ifdef CONFIG_TRANSPARENT_HUGEPAGE
85 unsigned long address, pmd_t *pmdp); 95extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
96 unsigned long address, pmd_t *pmdp);
97#else
98/*
99 * Despite relevant to THP only, this API is called from generic rmap code
100 * under PageTransHuge(), hence needs a dummy implementation for !THP
101 */
102static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
103 unsigned long address, pmd_t *pmdp)
104{
105 BUILD_BUG();
106 return 0;
107}
108#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
86#endif 109#endif
87 110
88#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR 111#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
@@ -175,11 +198,11 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
175 pmd_t old_pmd = *pmdp; 198 pmd_t old_pmd = *pmdp;
176 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); 199 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
177} 200}
178#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 201#else
179static inline void pmdp_set_wrprotect(struct mm_struct *mm, 202static inline void pmdp_set_wrprotect(struct mm_struct *mm,
180 unsigned long address, pmd_t *pmdp) 203 unsigned long address, pmd_t *pmdp)
181{ 204{
182 BUG(); 205 BUILD_BUG();
183} 206}
184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 207#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
185#endif 208#endif
@@ -248,7 +271,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
248#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 271#else /* CONFIG_TRANSPARENT_HUGEPAGE */
249static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 272static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
250{ 273{
251 BUG(); 274 BUILD_BUG();
252 return 0; 275 return 0;
253} 276}
254#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 277#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 0bec580a4885..5d8ffa3e6f8c 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -24,7 +24,7 @@ static __always_inline void preempt_count_set(int pc)
24 * must be macros to avoid header recursion hell 24 * must be macros to avoid header recursion hell
25 */ 25 */
26#define init_task_preempt_count(p) do { \ 26#define init_task_preempt_count(p) do { \
27 task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ 27 task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
28} while (0) 28} while (0)
29 29
30#define init_idle_preempt_count(p, cpu) do { \ 30#define init_idle_preempt_count(p, cpu) do { \
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index 4d76f24df518..0abc6b6062fb 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -10,12 +10,12 @@
10 10
11typedef struct qrwlock { 11typedef struct qrwlock {
12 atomic_t cnts; 12 atomic_t cnts;
13 arch_spinlock_t lock; 13 arch_spinlock_t wait_lock;
14} arch_rwlock_t; 14} arch_rwlock_t;
15 15
16#define __ARCH_RW_LOCK_UNLOCKED { \ 16#define __ARCH_RW_LOCK_UNLOCKED { \
17 .cnts = ATOMIC_INIT(0), \ 17 .cnts = ATOMIC_INIT(0), \
18 .lock = __ARCH_SPIN_LOCK_UNLOCKED, \ 18 .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
19} 19}
20 20
21#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */ 21#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index d48bf5a95cc1..d6d5dc98d7da 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -33,7 +33,7 @@
33 */ 33 */
34static inline void __down_read(struct rw_semaphore *sem) 34static inline void __down_read(struct rw_semaphore *sem)
35{ 35{
36 if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) 36 if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
37 rwsem_down_read_failed(sem); 37 rwsem_down_read_failed(sem);
38} 38}
39 39
@@ -42,7 +42,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
42 long tmp; 42 long tmp;
43 43
44 while ((tmp = sem->count) >= 0) { 44 while ((tmp = sem->count) >= 0) {
45 if (tmp == cmpxchg(&sem->count, tmp, 45 if (tmp == cmpxchg_acquire(&sem->count, tmp,
46 tmp + RWSEM_ACTIVE_READ_BIAS)) { 46 tmp + RWSEM_ACTIVE_READ_BIAS)) {
47 return 1; 47 return 1;
48 } 48 }
@@ -57,7 +57,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
57{ 57{
58 long tmp; 58 long tmp;
59 59
60 tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, 60 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
61 (atomic_long_t *)&sem->count); 61 (atomic_long_t *)&sem->count);
62 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) 62 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
63 rwsem_down_write_failed(sem); 63 rwsem_down_write_failed(sem);
@@ -72,7 +72,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
72{ 72{
73 long tmp; 73 long tmp;
74 74
75 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 75 tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
76 RWSEM_ACTIVE_WRITE_BIAS); 76 RWSEM_ACTIVE_WRITE_BIAS);
77 return tmp == RWSEM_UNLOCKED_VALUE; 77 return tmp == RWSEM_UNLOCKED_VALUE;
78} 78}
@@ -84,7 +84,7 @@ static inline void __up_read(struct rw_semaphore *sem)
84{ 84{
85 long tmp; 85 long tmp;
86 86
87 tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); 87 tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
88 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) 88 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
89 rwsem_wake(sem); 89 rwsem_wake(sem);
90} 90}
@@ -94,7 +94,7 @@ static inline void __up_read(struct rw_semaphore *sem)
94 */ 94 */
95static inline void __up_write(struct rw_semaphore *sem) 95static inline void __up_write(struct rw_semaphore *sem)
96{ 96{
97 if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 97 if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
98 (atomic_long_t *)&sem->count) < 0)) 98 (atomic_long_t *)&sem->count) < 0))
99 rwsem_wake(sem); 99 rwsem_wake(sem);
100} 100}
@@ -114,7 +114,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
114{ 114{
115 long tmp; 115 long tmp;
116 116
117 tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, 117 /*
118 * When downgrading from exclusive to shared ownership,
119 * anything inside the write-locked region cannot leak
120 * into the read side. In contrast, anything in the
121 * read-locked region is ok to be re-ordered into the
122 * write side. As such, rely on RELEASE semantics.
123 */
124 tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
118 (atomic_long_t *)&sem->count); 125 (atomic_long_t *)&sem->count);
119 if (tmp < 0) 126 if (tmp < 0)
120 rwsem_downgrade_wake(sem); 127 rwsem_downgrade_wake(sem);
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index e6b1e0a808ae..c673d2c87c60 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -9,7 +9,19 @@
9#ifndef _DT_BINDINGS_GPIO_GPIO_H 9#ifndef _DT_BINDINGS_GPIO_GPIO_H
10#define _DT_BINDINGS_GPIO_GPIO_H 10#define _DT_BINDINGS_GPIO_GPIO_H
11 11
12/* Bit 0 express polarity */
12#define GPIO_ACTIVE_HIGH 0 13#define GPIO_ACTIVE_HIGH 0
13#define GPIO_ACTIVE_LOW 1 14#define GPIO_ACTIVE_LOW 1
14 15
16/* Bit 1 express single-endedness */
17#define GPIO_PUSH_PULL 0
18#define GPIO_SINGLE_ENDED 2
19
20/*
21 * Open Drain/Collector is the combination of single-ended active low,
22 * Open Source/Emitter is the combination of single-ended active high.
23 */
24#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_ACTIVE_LOW)
25#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_ACTIVE_HIGH)
26
15#endif 27#endif
diff --git a/include/dt-bindings/leds/leds-netxbig.h b/include/dt-bindings/leds/leds-netxbig.h
new file mode 100644
index 000000000000..92658b0310b2
--- /dev/null
+++ b/include/dt-bindings/leds/leds-netxbig.h
@@ -0,0 +1,18 @@
1/*
2 * This header provides constants for netxbig LED bindings.
3 *
4 * This file is licensed under the terms of the GNU General Public
5 * License version 2. This program is licensed "as is" without any
6 * warranty of any kind, whether express or implied.
7 */
8
9#ifndef _DT_BINDINGS_LEDS_NETXBIG_H
10#define _DT_BINDINGS_LEDS_NETXBIG_H
11
12#define NETXBIG_LED_OFF 0
13#define NETXBIG_LED_ON 1
14#define NETXBIG_LED_SATA 2
15#define NETXBIG_LED_TIMER1 3
16#define NETXBIG_LED_TIMER2 4
17
18#endif /* _DT_BINDINGS_LEDS_NETXBIG_H */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4e14dac282bb..6a3538ef7275 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -282,7 +282,7 @@ struct vgic_v2_cpu_if {
282}; 282};
283 283
284struct vgic_v3_cpu_if { 284struct vgic_v3_cpu_if {
285#ifdef CONFIG_ARM_GIC_V3 285#ifdef CONFIG_KVM_ARM_VGIC_V3
286 u32 vgic_hcr; 286 u32 vgic_hcr;
287 u32 vgic_vmcr; 287 u32 vgic_vmcr;
288 u32 vgic_sre; /* Restored only, change ignored */ 288 u32 vgic_sre; /* Restored only, change ignored */
@@ -364,7 +364,7 @@ void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
364int vgic_v2_probe(struct device_node *vgic_node, 364int vgic_v2_probe(struct device_node *vgic_node,
365 const struct vgic_ops **ops, 365 const struct vgic_ops **ops,
366 const struct vgic_params **params); 366 const struct vgic_params **params);
367#ifdef CONFIG_ARM_GIC_V3 367#ifdef CONFIG_KVM_ARM_VGIC_V3
368int vgic_v3_probe(struct device_node *vgic_node, 368int vgic_v3_probe(struct device_node *vgic_node,
369 const struct vgic_ops **ops, 369 const struct vgic_ops **ops,
370 const struct vgic_params **params); 370 const struct vgic_params **params);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 43856d19cf4d..d863e12bbead 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -201,6 +201,9 @@ int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity
201int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); 201int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
202int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); 202int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
203 203
204void acpi_set_irq_model(enum acpi_irq_model_id model,
205 struct fwnode_handle *fwnode);
206
204#ifdef CONFIG_X86_IO_APIC 207#ifdef CONFIG_X86_IO_APIC
205extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); 208extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
206#else 209#else
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 50fc66868402..9006c4e75cf7 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -41,8 +41,6 @@ struct amba_driver {
41 int (*probe)(struct amba_device *, const struct amba_id *); 41 int (*probe)(struct amba_device *, const struct amba_id *);
42 int (*remove)(struct amba_device *); 42 int (*remove)(struct amba_device *);
43 void (*shutdown)(struct amba_device *); 43 void (*shutdown)(struct amba_device *);
44 int (*suspend)(struct amba_device *, pm_message_t);
45 int (*resume)(struct amba_device *);
46 const struct amba_id *id_table; 44 const struct amba_id *id_table;
47}; 45};
48 46
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 00a5763e850e..301de78d65f7 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -81,6 +81,30 @@
81#endif 81#endif
82#endif /* atomic_add_return_relaxed */ 82#endif /* atomic_add_return_relaxed */
83 83
84/* atomic_inc_return_relaxed */
85#ifndef atomic_inc_return_relaxed
86#define atomic_inc_return_relaxed atomic_inc_return
87#define atomic_inc_return_acquire atomic_inc_return
88#define atomic_inc_return_release atomic_inc_return
89
90#else /* atomic_inc_return_relaxed */
91
92#ifndef atomic_inc_return_acquire
93#define atomic_inc_return_acquire(...) \
94 __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
95#endif
96
97#ifndef atomic_inc_return_release
98#define atomic_inc_return_release(...) \
99 __atomic_op_release(atomic_inc_return, __VA_ARGS__)
100#endif
101
102#ifndef atomic_inc_return
103#define atomic_inc_return(...) \
104 __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
105#endif
106#endif /* atomic_inc_return_relaxed */
107
84/* atomic_sub_return_relaxed */ 108/* atomic_sub_return_relaxed */
85#ifndef atomic_sub_return_relaxed 109#ifndef atomic_sub_return_relaxed
86#define atomic_sub_return_relaxed atomic_sub_return 110#define atomic_sub_return_relaxed atomic_sub_return
@@ -105,6 +129,30 @@
105#endif 129#endif
106#endif /* atomic_sub_return_relaxed */ 130#endif /* atomic_sub_return_relaxed */
107 131
132/* atomic_dec_return_relaxed */
133#ifndef atomic_dec_return_relaxed
134#define atomic_dec_return_relaxed atomic_dec_return
135#define atomic_dec_return_acquire atomic_dec_return
136#define atomic_dec_return_release atomic_dec_return
137
138#else /* atomic_dec_return_relaxed */
139
140#ifndef atomic_dec_return_acquire
141#define atomic_dec_return_acquire(...) \
142 __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
143#endif
144
145#ifndef atomic_dec_return_release
146#define atomic_dec_return_release(...) \
147 __atomic_op_release(atomic_dec_return, __VA_ARGS__)
148#endif
149
150#ifndef atomic_dec_return
151#define atomic_dec_return(...) \
152 __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
153#endif
154#endif /* atomic_dec_return_relaxed */
155
108/* atomic_xchg_relaxed */ 156/* atomic_xchg_relaxed */
109#ifndef atomic_xchg_relaxed 157#ifndef atomic_xchg_relaxed
110#define atomic_xchg_relaxed atomic_xchg 158#define atomic_xchg_relaxed atomic_xchg
@@ -185,6 +233,31 @@
185#endif 233#endif
186#endif /* atomic64_add_return_relaxed */ 234#endif /* atomic64_add_return_relaxed */
187 235
236/* atomic64_inc_return_relaxed */
237#ifndef atomic64_inc_return_relaxed
238#define atomic64_inc_return_relaxed atomic64_inc_return
239#define atomic64_inc_return_acquire atomic64_inc_return
240#define atomic64_inc_return_release atomic64_inc_return
241
242#else /* atomic64_inc_return_relaxed */
243
244#ifndef atomic64_inc_return_acquire
245#define atomic64_inc_return_acquire(...) \
246 __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
247#endif
248
249#ifndef atomic64_inc_return_release
250#define atomic64_inc_return_release(...) \
251 __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
252#endif
253
254#ifndef atomic64_inc_return
255#define atomic64_inc_return(...) \
256 __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
257#endif
258#endif /* atomic64_inc_return_relaxed */
259
260
188/* atomic64_sub_return_relaxed */ 261/* atomic64_sub_return_relaxed */
189#ifndef atomic64_sub_return_relaxed 262#ifndef atomic64_sub_return_relaxed
190#define atomic64_sub_return_relaxed atomic64_sub_return 263#define atomic64_sub_return_relaxed atomic64_sub_return
@@ -209,6 +282,30 @@
209#endif 282#endif
210#endif /* atomic64_sub_return_relaxed */ 283#endif /* atomic64_sub_return_relaxed */
211 284
285/* atomic64_dec_return_relaxed */
286#ifndef atomic64_dec_return_relaxed
287#define atomic64_dec_return_relaxed atomic64_dec_return
288#define atomic64_dec_return_acquire atomic64_dec_return
289#define atomic64_dec_return_release atomic64_dec_return
290
291#else /* atomic64_dec_return_relaxed */
292
293#ifndef atomic64_dec_return_acquire
294#define atomic64_dec_return_acquire(...) \
295 __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
296#endif
297
298#ifndef atomic64_dec_return_release
299#define atomic64_dec_return_release(...) \
300 __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
301#endif
302
303#ifndef atomic64_dec_return
304#define atomic64_dec_return(...) \
305 __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
306#endif
307#endif /* atomic64_dec_return_relaxed */
308
212/* atomic64_xchg_relaxed */ 309/* atomic64_xchg_relaxed */
213#ifndef atomic64_xchg_relaxed 310#ifndef atomic64_xchg_relaxed
214#define atomic64_xchg_relaxed atomic64_xchg 311#define atomic64_xchg_relaxed atomic64_xchg
@@ -451,7 +548,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
451} 548}
452#endif 549#endif
453 550
454#include <asm-generic/atomic-long.h>
455#ifdef CONFIG_GENERIC_ATOMIC64 551#ifdef CONFIG_GENERIC_ATOMIC64
456#include <asm-generic/atomic64.h> 552#include <asm-generic/atomic64.h>
457#endif 553#endif
@@ -463,4 +559,6 @@ static inline void atomic64_andnot(long long i, atomic64_t *v)
463} 559}
464#endif 560#endif
465 561
562#include <asm-generic/atomic-long.h>
563
466#endif /* _LINUX_ATOMIC_H */ 564#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index a23209b43842..1b4d69f68c33 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -116,6 +116,8 @@ struct bdi_writeback {
116 struct list_head work_list; 116 struct list_head work_list;
117 struct delayed_work dwork; /* work item used for writeback */ 117 struct delayed_work dwork; /* work item used for writeback */
118 118
119 struct list_head bdi_node; /* anchored at bdi->wb_list */
120
119#ifdef CONFIG_CGROUP_WRITEBACK 121#ifdef CONFIG_CGROUP_WRITEBACK
120 struct percpu_ref refcnt; /* used only for !root wb's */ 122 struct percpu_ref refcnt; /* used only for !root wb's */
121 struct fprop_local_percpu memcg_completions; 123 struct fprop_local_percpu memcg_completions;
@@ -150,6 +152,7 @@ struct backing_dev_info {
150 atomic_long_t tot_write_bandwidth; 152 atomic_long_t tot_write_bandwidth;
151 153
152 struct bdi_writeback wb; /* the root writeback info for this bdi */ 154 struct bdi_writeback wb; /* the root writeback info for this bdi */
155 struct list_head wb_list; /* list of all wbs */
153#ifdef CONFIG_CGROUP_WRITEBACK 156#ifdef CONFIG_CGROUP_WRITEBACK
154 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ 157 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
155 struct rb_root cgwb_congested_tree; /* their congested states */ 158 struct rb_root cgwb_congested_tree; /* their congested states */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index d5eb4ad1c534..c85f74946a8b 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -19,13 +19,17 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20 20
21int __must_check bdi_init(struct backing_dev_info *bdi); 21int __must_check bdi_init(struct backing_dev_info *bdi);
22void bdi_destroy(struct backing_dev_info *bdi); 22void bdi_exit(struct backing_dev_info *bdi);
23 23
24__printf(3, 4) 24__printf(3, 4)
25int bdi_register(struct backing_dev_info *bdi, struct device *parent, 25int bdi_register(struct backing_dev_info *bdi, struct device *parent,
26 const char *fmt, ...); 26 const char *fmt, ...);
27int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 27int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
28void bdi_unregister(struct backing_dev_info *bdi);
29
28int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 30int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
31void bdi_destroy(struct backing_dev_info *bdi);
32
29void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, 33void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
30 bool range_cyclic, enum wb_reason reason); 34 bool range_cyclic, enum wb_reason reason);
31void wb_start_background_writeback(struct bdi_writeback *wb); 35void wb_start_background_writeback(struct bdi_writeback *wb);
@@ -408,61 +412,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
408 rcu_read_unlock(); 412 rcu_read_unlock();
409} 413}
410 414
411struct wb_iter {
412 int start_memcg_id;
413 struct radix_tree_iter tree_iter;
414 void **slot;
415};
416
417static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
418 struct backing_dev_info *bdi)
419{
420 struct radix_tree_iter *titer = &iter->tree_iter;
421
422 WARN_ON_ONCE(!rcu_read_lock_held());
423
424 if (iter->start_memcg_id >= 0) {
425 iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
426 iter->start_memcg_id = -1;
427 } else {
428 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
429 }
430
431 if (!iter->slot)
432 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
433 if (iter->slot)
434 return *iter->slot;
435 return NULL;
436}
437
438static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
439 struct backing_dev_info *bdi,
440 int start_memcg_id)
441{
442 iter->start_memcg_id = start_memcg_id;
443
444 if (start_memcg_id)
445 return __wb_iter_next(iter, bdi);
446 else
447 return &bdi->wb;
448}
449
450/**
451 * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
452 * @wb_cur: cursor struct bdi_writeback pointer
453 * @bdi: bdi to walk wb's of
454 * @iter: pointer to struct wb_iter to be used as iteration buffer
455 * @start_memcg_id: memcg ID to start iteration from
456 *
457 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
458 * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
459 * to be used as temp storage during iteration. rcu_read_lock() must be
460 * held throughout iteration.
461 */
462#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
463 for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
464 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
465
466#else /* CONFIG_CGROUP_WRITEBACK */ 415#else /* CONFIG_CGROUP_WRITEBACK */
467 416
468static inline bool inode_cgwb_enabled(struct inode *inode) 417static inline bool inode_cgwb_enabled(struct inode *inode)
@@ -522,14 +471,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg)
522{ 471{
523} 472}
524 473
525struct wb_iter {
526 int next_id;
527};
528
529#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
530 for ((iter)->next_id = (start_blkcg_id); \
531 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
532
533static inline int inode_congested(struct inode *inode, int cong_bits) 474static inline int inode_congested(struct inode *inode, int cong_bits)
534{ 475{
535 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 476 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 0a5cc7a1109b..c02e669945e9 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -713,9 +713,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
713 713
714 if (!throtl) { 714 if (!throtl) {
715 blkg = blkg ?: q->root_blkg; 715 blkg = blkg ?: q->root_blkg;
716 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags, 716 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
717 bio->bi_iter.bi_size); 717 bio->bi_iter.bi_size);
718 blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1); 718 blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
719 } 719 }
720 720
721 rcu_read_unlock(); 721 rcu_read_unlock();
diff --git a/include/linux/cma.h b/include/linux/cma.h
index f7ef093ec49a..29f9e774ab76 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
26extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 26extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
27 unsigned int order_per_bit, 27 unsigned int order_per_bit,
28 struct cma **res_cma); 28 struct cma **res_cma);
29extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); 29extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
30extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); 30extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
31#endif 31#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index dfaa7b3e9ae9..8efb40e61d6e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -237,12 +237,25 @@
237#define KASAN_ABI_VERSION 3 237#define KASAN_ABI_VERSION 3
238#endif 238#endif
239 239
240#if GCC_VERSION >= 40902
241/*
242 * Tell the compiler that address safety instrumentation (KASAN)
243 * should not be applied to that function.
244 * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
245 */
246#define __no_sanitize_address __attribute__((no_sanitize_address))
247#endif
248
240#endif /* gcc version >= 40000 specific checks */ 249#endif /* gcc version >= 40000 specific checks */
241 250
242#if !defined(__noclone) 251#if !defined(__noclone)
243#define __noclone /* not needed */ 252#define __noclone /* not needed */
244#endif 253#endif
245 254
255#if !defined(__no_sanitize_address)
256#define __no_sanitize_address
257#endif
258
246/* 259/*
247 * A trick to suppress uninitialized variable warning without generating any 260 * A trick to suppress uninitialized variable warning without generating any
248 * code 261 * code
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c836eb2dc44d..fe817432190c 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -198,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
198 198
199#include <uapi/linux/types.h> 199#include <uapi/linux/types.h>
200 200
201static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 201#define __READ_ONCE_SIZE \
202({ \
203 switch (size) { \
204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
207 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
208 default: \
209 barrier(); \
210 __builtin_memcpy((void *)res, (const void *)p, size); \
211 barrier(); \
212 } \
213})
214
215static __always_inline
216void __read_once_size(const volatile void *p, void *res, int size)
202{ 217{
203 switch (size) { 218 __READ_ONCE_SIZE;
204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; 219}
205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; 220
206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; 221#ifdef CONFIG_KASAN
207 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; 222/*
208 default: 223 * This function is not 'inline' because __no_sanitize_address confilcts
209 barrier(); 224 * with inlining. Attempt to inline it may cause a build failure.
210 __builtin_memcpy((void *)res, (const void *)p, size); 225 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
211 barrier(); 226 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
212 } 227 */
228static __no_sanitize_address __maybe_unused
229void __read_once_size_nocheck(const volatile void *p, void *res, int size)
230{
231 __READ_ONCE_SIZE;
232}
233#else
234static __always_inline
235void __read_once_size_nocheck(const volatile void *p, void *res, int size)
236{
237 __READ_ONCE_SIZE;
213} 238}
239#endif
214 240
215static __always_inline void __write_once_size(volatile void *p, void *res, int size) 241static __always_inline void __write_once_size(volatile void *p, void *res, int size)
216{ 242{
@@ -248,8 +274,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
248 * required ordering. 274 * required ordering.
249 */ 275 */
250 276
251#define READ_ONCE(x) \ 277#define __READ_ONCE(x, check) \
252 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 278({ \
279 union { typeof(x) __val; char __c[1]; } __u; \
280 if (check) \
281 __read_once_size(&(x), __u.__c, sizeof(x)); \
282 else \
283 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
284 __u.__val; \
285})
286#define READ_ONCE(x) __READ_ONCE(x, 1)
287
288/*
289 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
290 * to hide memory access from KASAN.
291 */
292#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
253 293
254#define WRITE_ONCE(x, val) \ 294#define WRITE_ONCE(x, val) \
255({ \ 295({ \
@@ -259,22 +299,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
259 __u.__val; \ 299 __u.__val; \
260}) 300})
261 301
262/**
263 * READ_ONCE_CTRL - Read a value heading a control dependency
264 * @x: The value to be read, heading the control dependency
265 *
266 * Control dependencies are tricky. See Documentation/memory-barriers.txt
267 * for important information on how to use them. Note that in many cases,
268 * use of smp_load_acquire() will be much simpler. Control dependencies
269 * should be avoided except on the hottest of hotpaths.
270 */
271#define READ_ONCE_CTRL(x) \
272({ \
273 typeof(x) __val = READ_ONCE(x); \
274 smp_read_barrier_depends(); /* Enforce control dependency. */ \
275 __val; \
276})
277
278#endif /* __KERNEL__ */ 302#endif /* __KERNEL__ */
279 303
280#endif /* __ASSEMBLY__ */ 304#endif /* __ASSEMBLY__ */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 23c30bdcca86..d2ca8c38f9c4 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -228,7 +228,6 @@ extern struct bus_type cpu_subsys;
228extern void cpu_hotplug_begin(void); 228extern void cpu_hotplug_begin(void);
229extern void cpu_hotplug_done(void); 229extern void cpu_hotplug_done(void);
230extern void get_online_cpus(void); 230extern void get_online_cpus(void);
231extern bool try_get_online_cpus(void);
232extern void put_online_cpus(void); 231extern void put_online_cpus(void);
233extern void cpu_hotplug_disable(void); 232extern void cpu_hotplug_disable(void);
234extern void cpu_hotplug_enable(void); 233extern void cpu_hotplug_enable(void);
@@ -246,7 +245,6 @@ int cpu_down(unsigned int cpu);
246static inline void cpu_hotplug_begin(void) {} 245static inline void cpu_hotplug_begin(void) {}
247static inline void cpu_hotplug_done(void) {} 246static inline void cpu_hotplug_done(void) {}
248#define get_online_cpus() do { } while (0) 247#define get_online_cpus() do { } while (0)
249#define try_get_online_cpus() true
250#define put_online_cpus() do { } while (0) 248#define put_online_cpus() do { } while (0)
251#define cpu_hotplug_disable() do { } while (0) 249#define cpu_hotplug_disable() do { } while (0)
252#define cpu_hotplug_enable() do { } while (0) 250#define cpu_hotplug_enable() do { } while (0)
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 569bbd039896..fec734df1524 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
111 return ret; 111 return ret;
112} 112}
113 113
114struct page *dma_alloc_from_contiguous(struct device *dev, int count, 114struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
115 unsigned int order); 115 unsigned int order);
116bool dma_release_from_contiguous(struct device *dev, struct page *pages, 116bool dma_release_from_contiguous(struct device *dev, struct page *pages,
117 int count); 117 int count);
@@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
144} 144}
145 145
146static inline 146static inline
147struct page *dma_alloc_from_contiguous(struct device *dev, int count, 147struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
148 unsigned int order) 148 unsigned int order)
149{ 149{
150 return NULL; 150 return NULL;
diff --git a/include/linux/edac.h b/include/linux/edac.h
index da3b72e95db3..b3d87e5822f8 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -769,12 +769,10 @@ struct mem_ctl_info {
769 /* the internal state of this controller instance */ 769 /* the internal state of this controller instance */
770 int op_state; 770 int op_state;
771 771
772#ifdef CONFIG_EDAC_DEBUG
773 struct dentry *debugfs; 772 struct dentry *debugfs;
774 u8 fake_inject_layer[EDAC_MAX_LAYERS]; 773 u8 fake_inject_layer[EDAC_MAX_LAYERS];
775 u32 fake_inject_ue; 774 u32 fake_inject_ue;
776 u16 fake_inject_count; 775 u16 fake_inject_count;
777#endif
778}; 776};
779 777
780/* 778/*
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 85ef051ac6fb..569b5a866bb1 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -99,6 +99,7 @@ typedef struct {
99#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ 99#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
100#define EFI_MEMORY_MORE_RELIABLE \ 100#define EFI_MEMORY_MORE_RELIABLE \
101 ((u64)0x0000000000010000ULL) /* higher reliability */ 101 ((u64)0x0000000000010000ULL) /* higher reliability */
102#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
102#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ 103#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
103#define EFI_MEMORY_DESCRIPTOR_VERSION 1 104#define EFI_MEMORY_DESCRIPTOR_VERSION 1
104 105
@@ -595,6 +596,9 @@ void efi_native_runtime_setup(void);
595#define DEVICE_TREE_GUID \ 596#define DEVICE_TREE_GUID \
596 EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 ) 597 EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 )
597 598
599#define EFI_PROPERTIES_TABLE_GUID \
600 EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 )
601
598typedef struct { 602typedef struct {
599 efi_guid_t guid; 603 efi_guid_t guid;
600 u64 table; 604 u64 table;
@@ -676,7 +680,7 @@ typedef struct {
676} efi_system_table_t; 680} efi_system_table_t;
677 681
678struct efi_memory_map { 682struct efi_memory_map {
679 void *phys_map; 683 phys_addr_t phys_map;
680 void *map; 684 void *map;
681 void *map_end; 685 void *map_end;
682 int nr_map; 686 int nr_map;
@@ -808,6 +812,15 @@ typedef struct _efi_file_io_interface {
808#define EFI_FILE_MODE_WRITE 0x0000000000000002 812#define EFI_FILE_MODE_WRITE 0x0000000000000002
809#define EFI_FILE_MODE_CREATE 0x8000000000000000 813#define EFI_FILE_MODE_CREATE 0x8000000000000000
810 814
815typedef struct {
816 u32 version;
817 u32 length;
818 u64 memory_protection_attribute;
819} efi_properties_table_t;
820
821#define EFI_PROPERTIES_TABLE_VERSION 0x00010000
822#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1
823
811#define EFI_INVALID_TABLE_ADDR (~0UL) 824#define EFI_INVALID_TABLE_ADDR (~0UL)
812 825
813/* 826/*
@@ -830,6 +843,7 @@ extern struct efi {
830 unsigned long runtime; /* runtime table */ 843 unsigned long runtime; /* runtime table */
831 unsigned long config_table; /* config tables */ 844 unsigned long config_table; /* config tables */
832 unsigned long esrt; /* ESRT table */ 845 unsigned long esrt; /* ESRT table */
846 unsigned long properties_table; /* properties table */
833 efi_get_time_t *get_time; 847 efi_get_time_t *get_time;
834 efi_set_time_t *set_time; 848 efi_set_time_t *set_time;
835 efi_get_wakeup_time_t *get_wakeup_time; 849 efi_get_wakeup_time_t *get_wakeup_time;
@@ -901,13 +915,19 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
901 struct resource *data_resource, struct resource *bss_resource); 915 struct resource *data_resource, struct resource *bss_resource);
902extern void efi_get_time(struct timespec *now); 916extern void efi_get_time(struct timespec *now);
903extern void efi_reserve_boot_services(void); 917extern void efi_reserve_boot_services(void);
904extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); 918extern int efi_get_fdt_params(struct efi_fdt_params *params);
905extern struct efi_memory_map memmap; 919extern struct efi_memory_map memmap;
906extern struct kobject *efi_kobj; 920extern struct kobject *efi_kobj;
907 921
908extern int efi_reboot_quirk_mode; 922extern int efi_reboot_quirk_mode;
909extern bool efi_poweroff_required(void); 923extern bool efi_poweroff_required(void);
910 924
925#ifdef CONFIG_EFI_FAKE_MEMMAP
926extern void __init efi_fake_memmap(void);
927#else
928static inline void efi_fake_memmap(void) { }
929#endif
930
911/* Iterate through an efi_memory_map */ 931/* Iterate through an efi_memory_map */
912#define for_each_efi_memory_desc(m, md) \ 932#define for_each_efi_memory_desc(m, md) \
913 for ((md) = (m)->map; \ 933 for ((md) = (m)->map; \
@@ -959,6 +979,7 @@ extern int __init efi_setup_pcdp_console(char *);
959#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ 979#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */
960#define EFI_ARCH_1 7 /* First arch-specific bit */ 980#define EFI_ARCH_1 7 /* First arch-specific bit */
961#define EFI_DBG 8 /* Print additional debug info at runtime */ 981#define EFI_DBG 8 /* Print additional debug info at runtime */
982#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
962 983
963#ifdef CONFIG_EFI 984#ifdef CONFIG_EFI
964/* 985/*
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 674e3e226465..5295535b60c6 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -26,6 +26,7 @@ struct fdtable {
26 struct file __rcu **fd; /* current fd array */ 26 struct file __rcu **fd; /* current fd array */
27 unsigned long *close_on_exec; 27 unsigned long *close_on_exec;
28 unsigned long *open_fds; 28 unsigned long *open_fds;
29 unsigned long *full_fds_bits;
29 struct rcu_head rcu; 30 struct rcu_head rcu;
30}; 31};
31 32
@@ -59,6 +60,7 @@ struct files_struct {
59 int next_fd; 60 int next_fd;
60 unsigned long close_on_exec_init[1]; 61 unsigned long close_on_exec_init[1];
61 unsigned long open_fds_init[1]; 62 unsigned long open_fds_init[1];
63 unsigned long full_fds_bits_init[1];
62 struct file __rcu * fd_array[NR_OPEN_DEFAULT]; 64 struct file __rcu * fd_array[NR_OPEN_DEFAULT];
63}; 65};
64 66
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 0408545bce42..37ec668546ab 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -17,6 +17,7 @@ enum fwnode_type {
17 FWNODE_OF, 17 FWNODE_OF,
18 FWNODE_ACPI, 18 FWNODE_ACPI,
19 FWNODE_PDATA, 19 FWNODE_PDATA,
20 FWNODE_IRQCHIP,
20}; 21};
21 22
22struct fwnode_handle { 23struct fwnode_handle {
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 14cac67c2012..fb0fde686cb1 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -400,6 +400,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
400{ 400{
401 return ERR_PTR(-EINVAL); 401 return ERR_PTR(-EINVAL);
402} 402}
403
403static inline int desc_to_gpio(const struct gpio_desc *desc) 404static inline int desc_to_gpio(const struct gpio_desc *desc)
404{ 405{
405 /* GPIO can never have been requested */ 406 /* GPIO can never have been requested */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 1aed31c5ffba..d1baebf350d8 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -206,6 +206,9 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
206 206
207#endif /* CONFIG_GPIOLIB_IRQCHIP */ 207#endif /* CONFIG_GPIOLIB_IRQCHIP */
208 208
209int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
210void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
211
209#ifdef CONFIG_PINCTRL 212#ifdef CONFIG_PINCTRL
210 213
211/** 214/**
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e38681f4912d..810a34f60424 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -59,7 +59,8 @@ extern struct fs_struct init_fs;
59 .rlim = INIT_RLIMITS, \ 59 .rlim = INIT_RLIMITS, \
60 .cputimer = { \ 60 .cputimer = { \
61 .cputime_atomic = INIT_CPUTIME_ATOMIC, \ 61 .cputime_atomic = INIT_CPUTIME_ATOMIC, \
62 .running = 0, \ 62 .running = false, \
63 .checking_timer = false, \
63 }, \ 64 }, \
64 INIT_PREV_CPUTIME(sig) \ 65 INIT_PREV_CPUTIME(sig) \
65 .cred_guard_mutex = \ 66 .cred_guard_mutex = \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index be7e75c945e9..ad16809c8596 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -102,6 +102,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
102 * @flags: flags (see IRQF_* above) 102 * @flags: flags (see IRQF_* above)
103 * @thread_fn: interrupt handler function for threaded interrupts 103 * @thread_fn: interrupt handler function for threaded interrupts
104 * @thread: thread pointer for threaded interrupts 104 * @thread: thread pointer for threaded interrupts
105 * @secondary: pointer to secondary irqaction (force threading)
105 * @thread_flags: flags related to @thread 106 * @thread_flags: flags related to @thread
106 * @thread_mask: bitmask for keeping track of @thread activity 107 * @thread_mask: bitmask for keeping track of @thread activity
107 * @dir: pointer to the proc/irq/NN/name entry 108 * @dir: pointer to the proc/irq/NN/name entry
@@ -113,6 +114,7 @@ struct irqaction {
113 struct irqaction *next; 114 struct irqaction *next;
114 irq_handler_t thread_fn; 115 irq_handler_t thread_fn;
115 struct task_struct *thread; 116 struct task_struct *thread;
117 struct irqaction *secondary;
116 unsigned int irq; 118 unsigned int irq;
117 unsigned int flags; 119 unsigned int flags;
118 unsigned long thread_flags; 120 unsigned long thread_flags;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 11bf09288ddb..3c1c96786248 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -67,11 +67,12 @@ enum irqchip_irq_state;
67 * request/setup_irq() 67 * request/setup_irq()
68 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) 68 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
69 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context 69 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
70 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread 70 * IRQ_NESTED_THREAD - Interrupt nests into another thread
71 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable 71 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
72 * IRQ_IS_POLLED - Always polled by another interrupt. Exclude 72 * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
73 * it from the spurious interrupt detection 73 * it from the spurious interrupt detection
74 * mechanism and from core side polling. 74 * mechanism and from core side polling.
75 * IRQ_DISABLE_UNLAZY - Disable lazy irq disable
75 */ 76 */
76enum { 77enum {
77 IRQ_TYPE_NONE = 0x00000000, 78 IRQ_TYPE_NONE = 0x00000000,
@@ -97,13 +98,14 @@ enum {
97 IRQ_NOTHREAD = (1 << 16), 98 IRQ_NOTHREAD = (1 << 16),
98 IRQ_PER_CPU_DEVID = (1 << 17), 99 IRQ_PER_CPU_DEVID = (1 << 17),
99 IRQ_IS_POLLED = (1 << 18), 100 IRQ_IS_POLLED = (1 << 18),
101 IRQ_DISABLE_UNLAZY = (1 << 19),
100}; 102};
101 103
102#define IRQF_MODIFY_MASK \ 104#define IRQF_MODIFY_MASK \
103 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ 105 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
104 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ 106 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
105 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ 107 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
106 IRQ_IS_POLLED) 108 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
107 109
108#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) 110#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
109 111
@@ -297,21 +299,6 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
297 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; 299 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
298} 300}
299 301
300/*
301 * Functions for chained handlers which can be enabled/disabled by the
302 * standard disable_irq/enable_irq calls. Must be called with
303 * irq_desc->lock held.
304 */
305static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
306{
307 __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS;
308}
309
310static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
311{
312 __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS;
313}
314
315static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 302static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
316{ 303{
317 return d->hwirq; 304 return d->hwirq;
@@ -452,6 +439,8 @@ extern int irq_set_affinity_locked(struct irq_data *data,
452 const struct cpumask *cpumask, bool force); 439 const struct cpumask *cpumask, bool force);
453extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); 440extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
454 441
442extern void irq_migrate_all_off_this_cpu(void);
443
455#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) 444#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
456void irq_move_irq(struct irq_data *data); 445void irq_move_irq(struct irq_data *data);
457void irq_move_masked_irq(struct irq_data *data); 446void irq_move_masked_irq(struct irq_data *data);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 9eeeb9589acf..c9ae0c6ec050 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -18,8 +18,6 @@
18#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H 18#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
19#define __LINUX_IRQCHIP_ARM_GIC_V3_H 19#define __LINUX_IRQCHIP_ARM_GIC_V3_H
20 20
21#include <asm/sysreg.h>
22
23/* 21/*
24 * Distributor registers. We assume we're running non-secure, with ARE 22 * Distributor registers. We assume we're running non-secure, with ARE
25 * being set. Secure-only and non-ARE registers are not described. 23 * being set. Secure-only and non-ARE registers are not described.
@@ -231,6 +229,7 @@
231#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) 229#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
232#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) 230#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
233#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) 231#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
232#define GITS_BASER_PAGES_MAX 256
234 233
235#define GITS_BASER_TYPE_NONE 0 234#define GITS_BASER_TYPE_NONE 0
236#define GITS_BASER_TYPE_DEVICE 1 235#define GITS_BASER_TYPE_DEVICE 1
@@ -266,16 +265,16 @@
266/* 265/*
267 * Hypervisor interface registers (SRE only) 266 * Hypervisor interface registers (SRE only)
268 */ 267 */
269#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1) 268#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
270 269
271#define ICH_LR_EOI (1UL << 41) 270#define ICH_LR_EOI (1ULL << 41)
272#define ICH_LR_GROUP (1UL << 60) 271#define ICH_LR_GROUP (1ULL << 60)
273#define ICH_LR_HW (1UL << 61) 272#define ICH_LR_HW (1ULL << 61)
274#define ICH_LR_STATE (3UL << 62) 273#define ICH_LR_STATE (3ULL << 62)
275#define ICH_LR_PENDING_BIT (1UL << 62) 274#define ICH_LR_PENDING_BIT (1ULL << 62)
276#define ICH_LR_ACTIVE_BIT (1UL << 63) 275#define ICH_LR_ACTIVE_BIT (1ULL << 63)
277#define ICH_LR_PHYS_ID_SHIFT 32 276#define ICH_LR_PHYS_ID_SHIFT 32
278#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT) 277#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
279 278
280#define ICH_MISR_EOI (1 << 0) 279#define ICH_MISR_EOI (1 << 0)
281#define ICH_MISR_U (1 << 1) 280#define ICH_MISR_U (1 << 1)
@@ -292,19 +291,8 @@
292#define ICH_VMCR_PMR_SHIFT 24 291#define ICH_VMCR_PMR_SHIFT 24
293#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) 292#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
294 293
295#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
296#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
297#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
298#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
299#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
300#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
301#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
302#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
303
304#define ICC_IAR1_EL1_SPURIOUS 0x3ff 294#define ICC_IAR1_EL1_SPURIOUS 0x3ff
305 295
306#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
307
308#define ICC_SRE_EL2_SRE (1 << 0) 296#define ICC_SRE_EL2_SRE (1 << 0)
309#define ICC_SRE_EL2_ENABLE (1 << 3) 297#define ICC_SRE_EL2_ENABLE (1 << 3)
310 298
@@ -320,54 +308,10 @@
320#define ICC_SGI1R_AFFINITY_3_SHIFT 48 308#define ICC_SGI1R_AFFINITY_3_SHIFT 48
321#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) 309#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
322 310
323/* 311#include <asm/arch_gicv3.h>
324 * System register definitions
325 */
326#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
327#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
328#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
329#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
330#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
331#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
332#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
333
334#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
335#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
336
337#define ICH_LR0_EL2 __LR0_EL2(0)
338#define ICH_LR1_EL2 __LR0_EL2(1)
339#define ICH_LR2_EL2 __LR0_EL2(2)
340#define ICH_LR3_EL2 __LR0_EL2(3)
341#define ICH_LR4_EL2 __LR0_EL2(4)
342#define ICH_LR5_EL2 __LR0_EL2(5)
343#define ICH_LR6_EL2 __LR0_EL2(6)
344#define ICH_LR7_EL2 __LR0_EL2(7)
345#define ICH_LR8_EL2 __LR8_EL2(0)
346#define ICH_LR9_EL2 __LR8_EL2(1)
347#define ICH_LR10_EL2 __LR8_EL2(2)
348#define ICH_LR11_EL2 __LR8_EL2(3)
349#define ICH_LR12_EL2 __LR8_EL2(4)
350#define ICH_LR13_EL2 __LR8_EL2(5)
351#define ICH_LR14_EL2 __LR8_EL2(6)
352#define ICH_LR15_EL2 __LR8_EL2(7)
353
354#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
355#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
356#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
357#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
358#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
359
360#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
361#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
362#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
363#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
364#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
365 312
366#ifndef __ASSEMBLY__ 313#ifndef __ASSEMBLY__
367 314
368#include <linux/stringify.h>
369#include <asm/msi.h>
370
371/* 315/*
372 * We need a value to serve as a irq-type for LPIs. Choose one that will 316 * We need a value to serve as a irq-type for LPIs. Choose one that will
373 * hopefully pique the interest of the reviewer. 317 * hopefully pique the interest of the reviewer.
@@ -385,23 +329,26 @@ struct rdists {
385 u64 flags; 329 u64 flags;
386}; 330};
387 331
388static inline void gic_write_eoir(u64 irq)
389{
390 asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
391 isb();
392}
393
394static inline void gic_write_dir(u64 irq)
395{
396 asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq));
397 isb();
398}
399
400struct irq_domain; 332struct irq_domain;
401int its_cpu_init(void); 333int its_cpu_init(void);
402int its_init(struct device_node *node, struct rdists *rdists, 334int its_init(struct device_node *node, struct rdists *rdists,
403 struct irq_domain *domain); 335 struct irq_domain *domain);
404 336
337static inline bool gic_enable_sre(void)
338{
339 u32 val;
340
341 val = gic_read_sre();
342 if (val & ICC_SRE_EL1_SRE)
343 return true;
344
345 val |= ICC_SRE_EL1_SRE;
346 gic_write_sre(val);
347 val = gic_read_sre();
348
349 return !!(val & ICC_SRE_EL1_SRE);
350}
351
405#endif 352#endif
406 353
407#endif 354#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index b8901dfd9e95..bae69e5d693c 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -100,16 +100,11 @@
100 100
101struct device_node; 101struct device_node;
102 102
103void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
104 u32 offset, struct device_node *);
105void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 103void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
106int gic_cpu_if_down(unsigned int gic_nr); 104int gic_cpu_if_down(unsigned int gic_nr);
107 105
108static inline void gic_init(unsigned int nr, int start, 106void gic_init(unsigned int nr, int start,
109 void __iomem *dist , void __iomem *cpu) 107 void __iomem *dist , void __iomem *cpu);
110{
111 gic_init_bases(nr, start, dist, cpu, 0, NULL);
112}
113 108
114int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); 109int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
115 110
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index f644fdb06dd6..d5e5c5bef28c 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -5,9 +5,10 @@
5 * helpful for interrupt controllers to implement mapping between hardware 5 * helpful for interrupt controllers to implement mapping between hardware
6 * irq numbers and the Linux irq number space. 6 * irq numbers and the Linux irq number space.
7 * 7 *
8 * irq_domains also have a hook for translating device tree interrupt 8 * irq_domains also have hooks for translating device tree or other
9 * representation into a hardware irq number that can be mapped back to a 9 * firmware interrupt representations into a hardware irq number that
10 * Linux irq number without any extra platform support code. 10 * can be mapped back to a Linux irq number without any extra platform
11 * support code.
11 * 12 *
12 * Interrupt controller "domain" data structure. This could be defined as a 13 * Interrupt controller "domain" data structure. This could be defined as a
13 * irq domain controller. That is, it handles the mapping between hardware 14 * irq domain controller. That is, it handles the mapping between hardware
@@ -17,16 +18,12 @@
17 * model). It's the domain callbacks that are responsible for setting the 18 * model). It's the domain callbacks that are responsible for setting the
18 * irq_chip on a given irq_desc after it's been mapped. 19 * irq_chip on a given irq_desc after it's been mapped.
19 * 20 *
20 * The host code and data structures are agnostic to whether or not 21 * The host code and data structures use a fwnode_handle pointer to
21 * we use an open firmware device-tree. We do have references to struct 22 * identify the domain. In some cases, and in order to preserve source
22 * device_node in two places: in irq_find_host() to find the host matching 23 * code compatibility, this fwnode pointer is "upgraded" to a DT
23 * a given interrupt controller node, and of course as an argument to its 24 * device_node. For those firmware infrastructures that do not provide
24 * counterpart domain->ops->match() callback. However, those are treated as 25 * a unique identifier for an interrupt controller, the irq_domain
25 * generic pointers by the core and the fact that it's actually a device-node 26 * code offers a fwnode allocator.
26 * pointer is purely a convention between callers and implementation. This
27 * code could thus be used on other architectures by replacing those two
28 * by some sort of arch-specific void * "token" used to identify interrupt
29 * controllers.
30 */ 27 */
31 28
32#ifndef _LINUX_IRQDOMAIN_H 29#ifndef _LINUX_IRQDOMAIN_H
@@ -34,6 +31,7 @@
34 31
35#include <linux/types.h> 32#include <linux/types.h>
36#include <linux/irqhandler.h> 33#include <linux/irqhandler.h>
34#include <linux/of.h>
37#include <linux/radix-tree.h> 35#include <linux/radix-tree.h>
38 36
39struct device_node; 37struct device_node;
@@ -45,6 +43,24 @@ struct irq_data;
45/* Number of irqs reserved for a legacy isa controller */ 43/* Number of irqs reserved for a legacy isa controller */
46#define NUM_ISA_INTERRUPTS 16 44#define NUM_ISA_INTERRUPTS 16
47 45
46#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16
47
48/**
49 * struct irq_fwspec - generic IRQ specifier structure
50 *
51 * @fwnode: Pointer to a firmware-specific descriptor
52 * @param_count: Number of device-specific parameters
53 * @param: Device-specific parameters
54 *
55 * This structure, directly modeled after of_phandle_args, is used to
56 * pass a device-specific description of an interrupt.
57 */
58struct irq_fwspec {
59 struct fwnode_handle *fwnode;
60 int param_count;
61 u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
62};
63
48/* 64/*
49 * Should several domains have the same device node, but serve 65 * Should several domains have the same device node, but serve
50 * different purposes (for example one domain is for PCI/MSI, and the 66 * different purposes (for example one domain is for PCI/MSI, and the
@@ -91,6 +107,8 @@ struct irq_domain_ops {
91 unsigned int nr_irqs); 107 unsigned int nr_irqs);
92 void (*activate)(struct irq_domain *d, struct irq_data *irq_data); 108 void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
93 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); 109 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
110 int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
111 unsigned long *out_hwirq, unsigned int *out_type);
94#endif 112#endif
95}; 113};
96 114
@@ -130,7 +148,7 @@ struct irq_domain {
130 unsigned int flags; 148 unsigned int flags;
131 149
132 /* Optional data */ 150 /* Optional data */
133 struct device_node *of_node; 151 struct fwnode_handle *fwnode;
134 enum irq_domain_bus_token bus_token; 152 enum irq_domain_bus_token bus_token;
135 struct irq_domain_chip_generic *gc; 153 struct irq_domain_chip_generic *gc;
136#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 154#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
@@ -163,11 +181,13 @@ enum {
163 181
164static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) 182static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
165{ 183{
166 return d->of_node; 184 return to_of_node(d->fwnode);
167} 185}
168 186
169#ifdef CONFIG_IRQ_DOMAIN 187#ifdef CONFIG_IRQ_DOMAIN
170struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, 188struct fwnode_handle *irq_domain_alloc_fwnode(void *data);
189void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
190struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
171 irq_hw_number_t hwirq_max, int direct_max, 191 irq_hw_number_t hwirq_max, int direct_max,
172 const struct irq_domain_ops *ops, 192 const struct irq_domain_ops *ops,
173 void *host_data); 193 void *host_data);
@@ -182,10 +202,21 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
182 irq_hw_number_t first_hwirq, 202 irq_hw_number_t first_hwirq,
183 const struct irq_domain_ops *ops, 203 const struct irq_domain_ops *ops,
184 void *host_data); 204 void *host_data);
185extern struct irq_domain *irq_find_matching_host(struct device_node *node, 205extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
186 enum irq_domain_bus_token bus_token); 206 enum irq_domain_bus_token bus_token);
187extern void irq_set_default_host(struct irq_domain *host); 207extern void irq_set_default_host(struct irq_domain *host);
188 208
209static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
210{
211 return node ? &node->fwnode : NULL;
212}
213
214static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
215 enum irq_domain_bus_token bus_token)
216{
217 return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token);
218}
219
189static inline struct irq_domain *irq_find_host(struct device_node *node) 220static inline struct irq_domain *irq_find_host(struct device_node *node)
190{ 221{
191 return irq_find_matching_host(node, DOMAIN_BUS_ANY); 222 return irq_find_matching_host(node, DOMAIN_BUS_ANY);
@@ -203,14 +234,14 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no
203 const struct irq_domain_ops *ops, 234 const struct irq_domain_ops *ops,
204 void *host_data) 235 void *host_data)
205{ 236{
206 return __irq_domain_add(of_node, size, size, 0, ops, host_data); 237 return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
207} 238}
208static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, 239static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
209 unsigned int max_irq, 240 unsigned int max_irq,
210 const struct irq_domain_ops *ops, 241 const struct irq_domain_ops *ops,
211 void *host_data) 242 void *host_data)
212{ 243{
213 return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data); 244 return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
214} 245}
215static inline struct irq_domain *irq_domain_add_legacy_isa( 246static inline struct irq_domain *irq_domain_add_legacy_isa(
216 struct device_node *of_node, 247 struct device_node *of_node,
@@ -224,7 +255,22 @@ static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node
224 const struct irq_domain_ops *ops, 255 const struct irq_domain_ops *ops,
225 void *host_data) 256 void *host_data)
226{ 257{
227 return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data); 258 return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data);
259}
260
261static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode,
262 unsigned int size,
263 const struct irq_domain_ops *ops,
264 void *host_data)
265{
266 return __irq_domain_add(fwnode, size, size, 0, ops, host_data);
267}
268
269static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode,
270 const struct irq_domain_ops *ops,
271 void *host_data)
272{
273 return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data);
228} 274}
229 275
230extern void irq_domain_remove(struct irq_domain *host); 276extern void irq_domain_remove(struct irq_domain *host);
@@ -239,6 +285,7 @@ extern void irq_domain_disassociate(struct irq_domain *domain,
239 285
240extern unsigned int irq_create_mapping(struct irq_domain *host, 286extern unsigned int irq_create_mapping(struct irq_domain *host,
241 irq_hw_number_t hwirq); 287 irq_hw_number_t hwirq);
288extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
242extern void irq_dispose_mapping(unsigned int virq); 289extern void irq_dispose_mapping(unsigned int virq);
243 290
244/** 291/**
@@ -290,10 +337,23 @@ extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
290 void *chip_data, irq_flow_handler_t handler, 337 void *chip_data, irq_flow_handler_t handler,
291 void *handler_data, const char *handler_name); 338 void *handler_data, const char *handler_name);
292#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 339#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
293extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, 340extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
294 unsigned int flags, unsigned int size, 341 unsigned int flags, unsigned int size,
295 struct device_node *node, 342 struct fwnode_handle *fwnode,
296 const struct irq_domain_ops *ops, void *host_data); 343 const struct irq_domain_ops *ops, void *host_data);
344
345static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
346 unsigned int flags,
347 unsigned int size,
348 struct device_node *node,
349 const struct irq_domain_ops *ops,
350 void *host_data)
351{
352 return irq_domain_create_hierarchy(parent, flags, size,
353 of_node_to_fwnode(node),
354 ops, host_data);
355}
356
297extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, 357extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
298 unsigned int nr_irqs, int node, void *arg, 358 unsigned int nr_irqs, int node, void *arg,
299 bool realloc); 359 bool realloc);
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index e374e369fb2f..eb1bdcf95f2e 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -3,7 +3,7 @@
3 3
4/** 4/**
5 * enum irqreturn 5 * enum irqreturn
6 * @IRQ_NONE interrupt was not from this device 6 * @IRQ_NONE interrupt was not from this device or was not handled
7 * @IRQ_HANDLED interrupt was handled by this device 7 * @IRQ_HANDLED interrupt was handled by this device
8 * @IRQ_WAKE_THREAD handler requests to wake the handler thread 8 * @IRQ_WAKE_THREAD handler requests to wake the handler thread
9 */ 9 */
diff --git a/include/linux/list.h b/include/linux/list.h
index 3e3e64a61002..993395a2e55c 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -87,7 +87,7 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
87static inline void __list_del(struct list_head * prev, struct list_head * next) 87static inline void __list_del(struct list_head * prev, struct list_head * next)
88{ 88{
89 next->prev = prev; 89 next->prev = prev;
90 prev->next = next; 90 WRITE_ONCE(prev->next, next);
91} 91}
92 92
93/** 93/**
@@ -615,7 +615,8 @@ static inline void __hlist_del(struct hlist_node *n)
615{ 615{
616 struct hlist_node *next = n->next; 616 struct hlist_node *next = n->next;
617 struct hlist_node **pprev = n->pprev; 617 struct hlist_node **pprev = n->pprev;
618 *pprev = next; 618
619 WRITE_ONCE(*pprev, next);
619 if (next) 620 if (next)
620 next->pprev = pprev; 621 next->pprev = pprev;
621} 622}
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index 2eb88556c5c5..8132214e8efd 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -93,9 +93,10 @@ static inline void __hlist_bl_del(struct hlist_bl_node *n)
93 LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); 93 LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
94 94
95 /* pprev may be `first`, so be careful not to lose the lock bit */ 95 /* pprev may be `first`, so be careful not to lose the lock bit */
96 *pprev = (struct hlist_bl_node *) 96 WRITE_ONCE(*pprev,
97 (struct hlist_bl_node *)
97 ((unsigned long)next | 98 ((unsigned long)next |
98 ((unsigned long)*pprev & LIST_BL_LOCKMASK)); 99 ((unsigned long)*pprev & LIST_BL_LOCKMASK)));
99 if (next) 100 if (next)
100 next->pprev = pprev; 101 next->pprev = pprev;
101} 102}
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index f266661d2666..444d2b1313bd 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -76,7 +76,8 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
76{ 76{
77 struct hlist_nulls_node *next = n->next; 77 struct hlist_nulls_node *next = n->next;
78 struct hlist_nulls_node **pprev = n->pprev; 78 struct hlist_nulls_node **pprev = n->pprev;
79 *pprev = next; 79
80 WRITE_ONCE(*pprev, next);
80 if (!is_a_nulls(next)) 81 if (!is_a_nulls(next))
81 next->pprev = pprev; 82 next->pprev = pprev;
82} 83}
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6452ff4c463f..3e3318ddfc0e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -676,8 +676,9 @@ enum {
676 676
677struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); 677struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
678struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 678struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
679void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, 679void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
680 unsigned long *pdirty, unsigned long *pwriteback); 680 unsigned long *pheadroom, unsigned long *pdirty,
681 unsigned long *pwriteback);
681 682
682#else /* CONFIG_CGROUP_WRITEBACK */ 683#else /* CONFIG_CGROUP_WRITEBACK */
683 684
@@ -687,7 +688,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
687} 688}
688 689
689static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 690static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
690 unsigned long *pavail, 691 unsigned long *pfilepages,
692 unsigned long *pheadroom,
691 unsigned long *pdirty, 693 unsigned long *pdirty,
692 unsigned long *pwriteback) 694 unsigned long *pwriteback)
693{ 695{
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index fdd0779ccdfa..eb0151bac50c 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -269,7 +269,6 @@ struct mmc_card {
269 /* for byte mode */ 269 /* for byte mode */
270#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ 270#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */
271 /* (missing CIA registers) */ 271 /* (missing CIA registers) */
272#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */
273#define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */ 272#define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */
274#define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ 273#define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */
275#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ 274#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 258daf914c6d..37967b6da03c 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -152,10 +152,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
152extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, 152extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
153 struct mmc_command *, int); 153 struct mmc_command *, int);
154extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); 154extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
155extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
156 bool, bool);
157extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); 155extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
158extern int mmc_send_tuning(struct mmc_host *host); 156extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
159extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); 157extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
160 158
161#define MMC_ERASE_ARG 0x00000000 159#define MMC_ERASE_ARG 0x00000000
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 134c57422740..f67b2ec18e6d 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/mmc/core.h> 18#include <linux/mmc/core.h>
19#include <linux/dmaengine.h>
19 20
20#define MAX_MCI_SLOTS 2 21#define MAX_MCI_SLOTS 2
21 22
@@ -40,6 +41,17 @@ enum {
40 41
41struct mmc_data; 42struct mmc_data;
42 43
44enum {
45 TRANS_MODE_PIO = 0,
46 TRANS_MODE_IDMAC,
47 TRANS_MODE_EDMAC
48};
49
50struct dw_mci_dma_slave {
51 struct dma_chan *ch;
52 enum dma_transfer_direction direction;
53};
54
43/** 55/**
44 * struct dw_mci - MMC controller state shared between all slots 56 * struct dw_mci - MMC controller state shared between all slots
45 * @lock: Spinlock protecting the queue and associated data. 57 * @lock: Spinlock protecting the queue and associated data.
@@ -154,7 +166,14 @@ struct dw_mci {
154 dma_addr_t sg_dma; 166 dma_addr_t sg_dma;
155 void *sg_cpu; 167 void *sg_cpu;
156 const struct dw_mci_dma_ops *dma_ops; 168 const struct dw_mci_dma_ops *dma_ops;
169 /* For idmac */
157 unsigned int ring_size; 170 unsigned int ring_size;
171
172 /* For edmac */
173 struct dw_mci_dma_slave *dms;
174 /* Registers's physical base address */
175 void *phy_regs;
176
158 u32 cmd_status; 177 u32 cmd_status;
159 u32 data_status; 178 u32 data_status;
160 u32 stop_cmdr; 179 u32 stop_cmdr;
@@ -208,8 +227,8 @@ struct dw_mci {
208struct dw_mci_dma_ops { 227struct dw_mci_dma_ops {
209 /* DMA Ops */ 228 /* DMA Ops */
210 int (*init)(struct dw_mci *host); 229 int (*init)(struct dw_mci *host);
211 void (*start)(struct dw_mci *host, unsigned int sg_len); 230 int (*start)(struct dw_mci *host, unsigned int sg_len);
212 void (*complete)(struct dw_mci *host); 231 void (*complete)(void *host);
213 void (*stop)(struct dw_mci *host); 232 void (*stop)(struct dw_mci *host);
214 void (*cleanup)(struct dw_mci *host); 233 void (*cleanup)(struct dw_mci *host);
215 void (*exit)(struct dw_mci *host); 234 void (*exit)(struct dw_mci *host);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 83b81fd865f3..8673ffe3d86e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -292,18 +292,6 @@ struct mmc_host {
292 292
293 mmc_pm_flag_t pm_caps; /* supported pm features */ 293 mmc_pm_flag_t pm_caps; /* supported pm features */
294 294
295#ifdef CONFIG_MMC_CLKGATE
296 int clk_requests; /* internal reference counter */
297 unsigned int clk_delay; /* number of MCI clk hold cycles */
298 bool clk_gated; /* clock gated */
299 struct delayed_work clk_gate_work; /* delayed clock gate */
300 unsigned int clk_old; /* old clock value cache */
301 spinlock_t clk_lock; /* lock for clk fields */
302 struct mutex clk_gate_mutex; /* mutex for clock gating */
303 struct device_attribute clkgate_delay_attr;
304 unsigned long clkgate_delay;
305#endif
306
307 /* host specific block data */ 295 /* host specific block data */
308 unsigned int max_seg_size; /* see blk_queue_max_segment_size */ 296 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
309 unsigned short max_segs; /* see blk_queue_max_segments */ 297 unsigned short max_segs; /* see blk_queue_max_segments */
@@ -423,6 +411,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply);
423int mmc_regulator_set_ocr(struct mmc_host *mmc, 411int mmc_regulator_set_ocr(struct mmc_host *mmc,
424 struct regulator *supply, 412 struct regulator *supply,
425 unsigned short vdd_bit); 413 unsigned short vdd_bit);
414int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
426#else 415#else
427static inline int mmc_regulator_get_ocrmask(struct regulator *supply) 416static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
428{ 417{
@@ -435,6 +424,12 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
435{ 424{
436 return 0; 425 return 0;
437} 426}
427
428static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
429 struct mmc_ios *ios)
430{
431 return -EINVAL;
432}
438#endif 433#endif
439 434
440int mmc_regulator_get_supply(struct mmc_host *mmc); 435int mmc_regulator_get_supply(struct mmc_host *mmc);
@@ -479,26 +474,6 @@ static inline int mmc_host_packed_wr(struct mmc_host *host)
479 return host->caps2 & MMC_CAP2_PACKED_WR; 474 return host->caps2 & MMC_CAP2_PACKED_WR;
480} 475}
481 476
482#ifdef CONFIG_MMC_CLKGATE
483void mmc_host_clk_hold(struct mmc_host *host);
484void mmc_host_clk_release(struct mmc_host *host);
485unsigned int mmc_host_clk_rate(struct mmc_host *host);
486
487#else
488static inline void mmc_host_clk_hold(struct mmc_host *host)
489{
490}
491
492static inline void mmc_host_clk_release(struct mmc_host *host)
493{
494}
495
496static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
497{
498 return host->ios.clock;
499}
500#endif
501
502static inline int mmc_card_hs(struct mmc_card *card) 477static inline int mmc_card_hs(struct mmc_card *card)
503{ 478{
504 return card->host->ios.timing == MMC_TIMING_SD_HS || 479 return card->host->ios.timing == MMC_TIMING_SD_HS ||
diff --git a/include/linux/msi.h b/include/linux/msi.h
index ad939d0ba816..0b4460374020 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -174,6 +174,7 @@ struct msi_controller {
174struct irq_domain; 174struct irq_domain;
175struct irq_chip; 175struct irq_chip;
176struct device_node; 176struct device_node;
177struct fwnode_handle;
177struct msi_domain_info; 178struct msi_domain_info;
178 179
179/** 180/**
@@ -262,7 +263,7 @@ enum {
262int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, 263int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
263 bool force); 264 bool force);
264 265
265struct irq_domain *msi_create_irq_domain(struct device_node *of_node, 266struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
266 struct msi_domain_info *info, 267 struct msi_domain_info *info,
267 struct irq_domain *parent); 268 struct irq_domain *parent);
268int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 269int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
@@ -270,7 +271,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
270void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); 271void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
271struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); 272struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
272 273
273struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, 274struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
274 struct msi_domain_info *info, 275 struct msi_domain_info *info,
275 struct irq_domain *parent); 276 struct irq_domain *parent);
276int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, 277int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
@@ -280,19 +281,26 @@ void platform_msi_domain_free_irqs(struct device *dev);
280 281
281#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN 282#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
282void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); 283void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
283struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, 284struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
284 struct msi_domain_info *info, 285 struct msi_domain_info *info,
285 struct irq_domain *parent); 286 struct irq_domain *parent);
286int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, 287int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
287 int nvec, int type); 288 int nvec, int type);
288void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); 289void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
289struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, 290struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
290 struct msi_domain_info *info, struct irq_domain *parent); 291 struct msi_domain_info *info, struct irq_domain *parent);
291 292
292irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, 293irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
293 struct msi_desc *desc); 294 struct msi_desc *desc);
294int pci_msi_domain_check_cap(struct irq_domain *domain, 295int pci_msi_domain_check_cap(struct irq_domain *domain,
295 struct msi_domain_info *info, struct device *dev); 296 struct msi_domain_info *info, struct device *dev);
297u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
298struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
299#else
300static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
301{
302 return NULL;
303}
296#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ 304#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
297 305
298#endif /* LINUX_MSI_H */ 306#endif /* LINUX_MSI_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2d15e3831440..210d11a75e4f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1054,6 +1054,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1054 * This function is used to pass protocol port error state information 1054 * This function is used to pass protocol port error state information
1055 * to the switch driver. The switch driver can react to the proto_down 1055 * to the switch driver. The switch driver can react to the proto_down
1056 * by doing a phys down on the associated switch port. 1056 * by doing a phys down on the associated switch port.
1057 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1058 * This function is used to get egress tunnel information for given skb.
1059 * This is useful for retrieving outer tunnel header parameters while
1060 * sampling packet.
1057 * 1061 *
1058 */ 1062 */
1059struct net_device_ops { 1063struct net_device_ops {
@@ -1227,6 +1231,8 @@ struct net_device_ops {
1227 int (*ndo_get_iflink)(const struct net_device *dev); 1231 int (*ndo_get_iflink)(const struct net_device *dev);
1228 int (*ndo_change_proto_down)(struct net_device *dev, 1232 int (*ndo_change_proto_down)(struct net_device *dev,
1229 bool proto_down); 1233 bool proto_down);
1234 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1235 struct sk_buff *skb);
1230}; 1236};
1231 1237
1232/** 1238/**
@@ -2203,6 +2209,7 @@ void dev_add_offload(struct packet_offload *po);
2203void dev_remove_offload(struct packet_offload *po); 2209void dev_remove_offload(struct packet_offload *po);
2204 2210
2205int dev_get_iflink(const struct net_device *dev); 2211int dev_get_iflink(const struct net_device *dev);
2212int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2206struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 2213struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2207 unsigned short mask); 2214 unsigned short mask);
2208struct net_device *dev_get_by_name(struct net *net, const char *name); 2215struct net_device *dev_get_by_name(struct net *net, const char *name);
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index f3191828f037..87d6d1632dd4 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -29,6 +29,7 @@ struct device_node;
29 */ 29 */
30enum of_gpio_flags { 30enum of_gpio_flags {
31 OF_GPIO_ACTIVE_LOW = 0x1, 31 OF_GPIO_ACTIVE_LOW = 0x1,
32 OF_GPIO_SINGLE_ENDED = 0x2,
32}; 33};
33 34
34#ifdef CONFIG_OF_GPIO 35#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 4bcbd586a672..65d969246a4d 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -46,6 +46,11 @@ extern int of_irq_get(struct device_node *dev, int index);
46extern int of_irq_get_byname(struct device_node *dev, const char *name); 46extern int of_irq_get_byname(struct device_node *dev, const char *name);
47extern int of_irq_to_resource_table(struct device_node *dev, 47extern int of_irq_to_resource_table(struct device_node *dev,
48 struct resource *res, int nr_irqs); 48 struct resource *res, int nr_irqs);
49extern struct irq_domain *of_msi_get_domain(struct device *dev,
50 struct device_node *np,
51 enum irq_domain_bus_token token);
52extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
53 u32 rid);
49#else 54#else
50static inline int of_irq_count(struct device_node *dev) 55static inline int of_irq_count(struct device_node *dev)
51{ 56{
@@ -64,6 +69,17 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
64{ 69{
65 return 0; 70 return 0;
66} 71}
72static inline struct irq_domain *of_msi_get_domain(struct device *dev,
73 struct device_node *np,
74 enum irq_domain_bus_token token)
75{
76 return NULL;
77}
78static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
79 u32 rid)
80{
81 return NULL;
82}
67#endif 83#endif
68 84
69#if defined(CONFIG_OF) 85#if defined(CONFIG_OF)
@@ -75,6 +91,7 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
75extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); 91extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
76extern struct device_node *of_irq_find_parent(struct device_node *child); 92extern struct device_node *of_irq_find_parent(struct device_node *child);
77extern void of_msi_configure(struct device *dev, struct device_node *np); 93extern void of_msi_configure(struct device *dev, struct device_node *np);
94u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
78 95
79#else /* !CONFIG_OF */ 96#else /* !CONFIG_OF */
80static inline unsigned int irq_of_parse_and_map(struct device_node *dev, 97static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
@@ -87,6 +104,12 @@ static inline void *of_irq_find_parent(struct device_node *child)
87{ 104{
88 return NULL; 105 return NULL;
89} 106}
107
108static inline u32 of_msi_map_rid(struct device *dev,
109 struct device_node *msi_np, u32 rid_in)
110{
111 return rid_in;
112}
90#endif /* !CONFIG_OF */ 113#endif /* !CONFIG_OF */
91 114
92#endif /* __OF_IRQ_H */ 115#endif /* __OF_IRQ_H */
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index e5a70132a240..88fa8af2b937 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -17,7 +17,7 @@
17 17
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19 19
20#define INT_DMA_LCD 25 20#define INT_DMA_LCD (NR_IRQS_LEGACY + 25)
21 21
22#define OMAP1_DMA_TOUT_IRQ (1 << 0) 22#define OMAP1_DMA_TOUT_IRQ (1 << 0)
23#define OMAP_DMA_DROP_IRQ (1 << 1) 23#define OMAP_DMA_DROP_IRQ (1 << 1)
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 834c4e52cb2d..c2fa3ecb0dce 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -5,11 +5,12 @@
5#include <linux/rwsem.h> 5#include <linux/rwsem.h>
6#include <linux/percpu.h> 6#include <linux/percpu.h>
7#include <linux/wait.h> 7#include <linux/wait.h>
8#include <linux/rcu_sync.h>
8#include <linux/lockdep.h> 9#include <linux/lockdep.h>
9 10
10struct percpu_rw_semaphore { 11struct percpu_rw_semaphore {
12 struct rcu_sync rss;
11 unsigned int __percpu *fast_read_ctr; 13 unsigned int __percpu *fast_read_ctr;
12 atomic_t write_ctr;
13 struct rw_semaphore rw_sem; 14 struct rw_semaphore rw_sem;
14 atomic_t slow_read_ctr; 15 atomic_t slow_read_ctr;
15 wait_queue_head_t write_waitq; 16 wait_queue_head_t write_waitq;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 092a0e8a479a..d841d33bcdc9 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -140,33 +140,67 @@ struct hw_perf_event {
140 }; 140 };
141#endif 141#endif
142 }; 142 };
143 /*
144 * If the event is a per task event, this will point to the task in
145 * question. See the comment in perf_event_alloc().
146 */
143 struct task_struct *target; 147 struct task_struct *target;
148
149/*
150 * hw_perf_event::state flags; used to track the PERF_EF_* state.
151 */
152#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
153#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
154#define PERF_HES_ARCH 0x04
155
144 int state; 156 int state;
157
158 /*
159 * The last observed hardware counter value, updated with a
160 * local64_cmpxchg() such that pmu::read() can be called nested.
161 */
145 local64_t prev_count; 162 local64_t prev_count;
163
164 /*
165 * The period to start the next sample with.
166 */
146 u64 sample_period; 167 u64 sample_period;
168
169 /*
170 * The period we started this sample with.
171 */
147 u64 last_period; 172 u64 last_period;
173
174 /*
175 * However much is left of the current period; note that this is
176 * a full 64bit value and allows for generation of periods longer
177 * than hardware might allow.
178 */
148 local64_t period_left; 179 local64_t period_left;
180
181 /*
182 * State for throttling the event, see __perf_event_overflow() and
183 * perf_adjust_freq_unthr_context().
184 */
149 u64 interrupts_seq; 185 u64 interrupts_seq;
150 u64 interrupts; 186 u64 interrupts;
151 187
188 /*
189 * State for freq target events, see __perf_event_overflow() and
190 * perf_adjust_freq_unthr_context().
191 */
152 u64 freq_time_stamp; 192 u64 freq_time_stamp;
153 u64 freq_count_stamp; 193 u64 freq_count_stamp;
154#endif 194#endif
155}; 195};
156 196
157/*
158 * hw_perf_event::state flags
159 */
160#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
161#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
162#define PERF_HES_ARCH 0x04
163
164struct perf_event; 197struct perf_event;
165 198
166/* 199/*
167 * Common implementation detail of pmu::{start,commit,cancel}_txn 200 * Common implementation detail of pmu::{start,commit,cancel}_txn
168 */ 201 */
169#define PERF_EVENT_TXN 0x1 202#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
203#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
170 204
171/** 205/**
172 * pmu::capabilities flags 206 * pmu::capabilities flags
@@ -210,7 +244,19 @@ struct pmu {
210 244
211 /* 245 /*
212 * Try and initialize the event for this PMU. 246 * Try and initialize the event for this PMU.
213 * Should return -ENOENT when the @event doesn't match this PMU. 247 *
248 * Returns:
249 * -ENOENT -- @event is not for this PMU
250 *
251 * -ENODEV -- @event is for this PMU but PMU not present
252 * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
253 * -EINVAL -- @event is for this PMU but @event is not valid
254 * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
255 * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
256 *
257 * 0 -- @event is for this PMU and valid
258 *
259 * Other error return values are allowed.
214 */ 260 */
215 int (*event_init) (struct perf_event *event); 261 int (*event_init) (struct perf_event *event);
216 262
@@ -221,27 +267,61 @@ struct pmu {
221 void (*event_mapped) (struct perf_event *event); /*optional*/ 267 void (*event_mapped) (struct perf_event *event); /*optional*/
222 void (*event_unmapped) (struct perf_event *event); /*optional*/ 268 void (*event_unmapped) (struct perf_event *event); /*optional*/
223 269
270 /*
271 * Flags for ->add()/->del()/ ->start()/->stop(). There are
272 * matching hw_perf_event::state flags.
273 */
224#define PERF_EF_START 0x01 /* start the counter when adding */ 274#define PERF_EF_START 0x01 /* start the counter when adding */
225#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ 275#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
226#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ 276#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
227 277
228 /* 278 /*
229 * Adds/Removes a counter to/from the PMU, can be done inside 279 * Adds/Removes a counter to/from the PMU, can be done inside a
230 * a transaction, see the ->*_txn() methods. 280 * transaction, see the ->*_txn() methods.
281 *
282 * The add/del callbacks will reserve all hardware resources required
283 * to service the event, this includes any counter constraint
284 * scheduling etc.
285 *
286 * Called with IRQs disabled and the PMU disabled on the CPU the event
287 * is on.
288 *
289 * ->add() called without PERF_EF_START should result in the same state
290 * as ->add() followed by ->stop().
291 *
292 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
293 * ->stop() that must deal with already being stopped without
294 * PERF_EF_UPDATE.
231 */ 295 */
232 int (*add) (struct perf_event *event, int flags); 296 int (*add) (struct perf_event *event, int flags);
233 void (*del) (struct perf_event *event, int flags); 297 void (*del) (struct perf_event *event, int flags);
234 298
235 /* 299 /*
236 * Starts/Stops a counter present on the PMU. The PMI handler 300 * Starts/Stops a counter present on the PMU.
237 * should stop the counter when perf_event_overflow() returns 301 *
238 * !0. ->start() will be used to continue. 302 * The PMI handler should stop the counter when perf_event_overflow()
303 * returns !0. ->start() will be used to continue.
304 *
305 * Also used to change the sample period.
306 *
307 * Called with IRQs disabled and the PMU disabled on the CPU the event
308 * is on -- will be called from NMI context with the PMU generates
309 * NMIs.
310 *
311 * ->stop() with PERF_EF_UPDATE will read the counter and update
312 * period/count values like ->read() would.
313 *
314 * ->start() with PERF_EF_RELOAD will reprogram the the counter
315 * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
239 */ 316 */
240 void (*start) (struct perf_event *event, int flags); 317 void (*start) (struct perf_event *event, int flags);
241 void (*stop) (struct perf_event *event, int flags); 318 void (*stop) (struct perf_event *event, int flags);
242 319
243 /* 320 /*
244 * Updates the counter value of the event. 321 * Updates the counter value of the event.
322 *
323 * For sampling capable PMUs this will also update the software period
324 * hw_perf_event::period_left field.
245 */ 325 */
246 void (*read) (struct perf_event *event); 326 void (*read) (struct perf_event *event);
247 327
@@ -252,20 +332,26 @@ struct pmu {
252 * 332 *
253 * Start the transaction, after this ->add() doesn't need to 333 * Start the transaction, after this ->add() doesn't need to
254 * do schedulability tests. 334 * do schedulability tests.
335 *
336 * Optional.
255 */ 337 */
256 void (*start_txn) (struct pmu *pmu); /* optional */ 338 void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
257 /* 339 /*
258 * If ->start_txn() disabled the ->add() schedulability test 340 * If ->start_txn() disabled the ->add() schedulability test
259 * then ->commit_txn() is required to perform one. On success 341 * then ->commit_txn() is required to perform one. On success
260 * the transaction is closed. On error the transaction is kept 342 * the transaction is closed. On error the transaction is kept
261 * open until ->cancel_txn() is called. 343 * open until ->cancel_txn() is called.
344 *
345 * Optional.
262 */ 346 */
263 int (*commit_txn) (struct pmu *pmu); /* optional */ 347 int (*commit_txn) (struct pmu *pmu);
264 /* 348 /*
265 * Will cancel the transaction, assumes ->del() is called 349 * Will cancel the transaction, assumes ->del() is called
266 * for each successful ->add() during the transaction. 350 * for each successful ->add() during the transaction.
351 *
352 * Optional.
267 */ 353 */
268 void (*cancel_txn) (struct pmu *pmu); /* optional */ 354 void (*cancel_txn) (struct pmu *pmu);
269 355
270 /* 356 /*
271 * Will return the value for perf_event_mmap_page::index for this event, 357 * Will return the value for perf_event_mmap_page::index for this event,
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h
index 281cb91ddcf5..05082e407c4a 100644
--- a/include/linux/pinctrl/devinfo.h
+++ b/include/linux/pinctrl/devinfo.h
@@ -24,10 +24,14 @@
24 * struct dev_pin_info - pin state container for devices 24 * struct dev_pin_info - pin state container for devices
25 * @p: pinctrl handle for the containing device 25 * @p: pinctrl handle for the containing device
26 * @default_state: the default state for the handle, if found 26 * @default_state: the default state for the handle, if found
27 * @init_state: the state at probe time, if found
28 * @sleep_state: the state at suspend time, if found
29 * @idle_state: the state at idle (runtime suspend) time, if found
27 */ 30 */
28struct dev_pin_info { 31struct dev_pin_info {
29 struct pinctrl *p; 32 struct pinctrl *p;
30 struct pinctrl_state *default_state; 33 struct pinctrl_state *default_state;
34 struct pinctrl_state *init_state;
31#ifdef CONFIG_PM 35#ifdef CONFIG_PM
32 struct pinctrl_state *sleep_state; 36 struct pinctrl_state *sleep_state;
33 struct pinctrl_state *idle_state; 37 struct pinctrl_state *idle_state;
@@ -35,6 +39,7 @@ struct dev_pin_info {
35}; 39};
36 40
37extern int pinctrl_bind_pins(struct device *dev); 41extern int pinctrl_bind_pins(struct device *dev);
42extern int pinctrl_init_done(struct device *dev);
38 43
39#else 44#else
40 45
@@ -45,5 +50,10 @@ static inline int pinctrl_bind_pins(struct device *dev)
45 return 0; 50 return 0;
46} 51}
47 52
53static inline int pinctrl_init_done(struct device *dev)
54{
55 return 0;
56}
57
48#endif /* CONFIG_PINCTRL */ 58#endif /* CONFIG_PINCTRL */
49#endif /* PINCTRL_DEVINFO_H */ 59#endif /* PINCTRL_DEVINFO_H */
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index fe65962b264f..d921afd5f109 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -20,6 +20,11 @@
20 20
21/** 21/**
22 * enum pin_config_param - possible pin configuration parameters 22 * enum pin_config_param - possible pin configuration parameters
23 * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
24 * weakly drives the last value on a tristate bus, also known as a "bus
25 * holder", "bus keeper" or "repeater". This allows another device on the
26 * bus to change the value by driving the bus high or low and switching to
27 * tristate. The argument is ignored.
23 * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a 28 * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a
24 * transition from say pull-up to pull-down implies that you disable 29 * transition from say pull-up to pull-down implies that you disable
25 * pull-up in the process, this setting disables all biasing. 30 * pull-up in the process, this setting disables all biasing.
@@ -29,14 +34,6 @@
29 * if for example some other pin is going to drive the signal connected 34 * if for example some other pin is going to drive the signal connected
30 * to it for a while. Pins used for input are usually always high 35 * to it for a while. Pins used for input are usually always high
31 * impedance. 36 * impedance.
32 * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
33 * weakly drives the last value on a tristate bus, also known as a "bus
34 * holder", "bus keeper" or "repeater". This allows another device on the
35 * bus to change the value by driving the bus high or low and switching to
36 * tristate. The argument is ignored.
37 * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
38 * impedance to VDD). If the argument is != 0 pull-up is enabled,
39 * if it is 0, pull-up is total, i.e. the pin is connected to VDD.
40 * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high 37 * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high
41 * impedance to GROUND). If the argument is != 0 pull-down is enabled, 38 * impedance to GROUND). If the argument is != 0 pull-down is enabled,
42 * if it is 0, pull-down is total, i.e. the pin is connected to GROUND. 39 * if it is 0, pull-down is total, i.e. the pin is connected to GROUND.
@@ -48,10 +45,9 @@
48 * If the argument is != 0 pull up/down is enabled, if it is 0, the 45 * If the argument is != 0 pull up/down is enabled, if it is 0, the
49 * configuration is ignored. The proper way to disable it is to use 46 * configuration is ignored. The proper way to disable it is to use
50 * @PIN_CONFIG_BIAS_DISABLE. 47 * @PIN_CONFIG_BIAS_DISABLE.
51 * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and 48 * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
52 * low, this is the most typical case and is typically achieved with two 49 * impedance to VDD). If the argument is != 0 pull-up is enabled,
53 * active transistors on the output. Setting this config will enable 50 * if it is 0, pull-up is total, i.e. the pin is connected to VDD.
54 * push-pull mode, the argument is ignored.
55 * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open 51 * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
56 * collector) which means it is usually wired with other output ports 52 * collector) which means it is usually wired with other output ports
57 * which are then pulled up with an external resistor. Setting this 53 * which are then pulled up with an external resistor. Setting this
@@ -59,28 +55,26 @@
59 * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source 55 * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
60 * (open emitter). Setting this config will enable open source mode, the 56 * (open emitter). Setting this config will enable open source mode, the
61 * argument is ignored. 57 * argument is ignored.
58 * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and
59 * low, this is the most typical case and is typically achieved with two
60 * active transistors on the output. Setting this config will enable
61 * push-pull mode, the argument is ignored.
62 * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current 62 * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
63 * passed as argument. The argument is in mA. 63 * passed as argument. The argument is in mA.
64 * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode,
65 * which means it will wait for signals to settle when reading inputs. The
66 * argument gives the debounce time in usecs. Setting the
67 * argument to zero turns debouncing off.
64 * @PIN_CONFIG_INPUT_ENABLE: enable the pin's input. Note that this does not 68 * @PIN_CONFIG_INPUT_ENABLE: enable the pin's input. Note that this does not
65 * affect the pin's ability to drive output. 1 enables input, 0 disables 69 * affect the pin's ability to drive output. 1 enables input, 0 disables
66 * input. 70 * input.
67 * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
68 * If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
69 * schmitt-trigger mode is disabled.
70 * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in 71 * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in
71 * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis, 72 * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis,
72 * the threshold value is given on a custom format as argument when 73 * the threshold value is given on a custom format as argument when
73 * setting pins to this mode. 74 * setting pins to this mode.
74 * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, 75 * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
75 * which means it will wait for signals to settle when reading inputs. The 76 * If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
76 * argument gives the debounce time in usecs. Setting the 77 * schmitt-trigger mode is disabled.
77 * argument to zero turns debouncing off.
78 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
79 * supplies, the argument to this parameter (on a custom format) tells
80 * the driver which alternative power source to use.
81 * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
82 * this parameter (on a custom format) tells the driver which alternative
83 * slew rate to use.
84 * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power 78 * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power
85 * operation, if several modes of operation are supported these can be 79 * operation, if several modes of operation are supported these can be
86 * passed in the argument on a custom form, else just use argument 1 80 * passed in the argument on a custom form, else just use argument 1
@@ -89,29 +83,35 @@
89 * 1 to indicate high level, argument 0 to indicate low level. (Please 83 * 1 to indicate high level, argument 0 to indicate low level. (Please
90 * see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a 84 * see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a
91 * discussion around this parameter.) 85 * discussion around this parameter.)
86 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
87 * supplies, the argument to this parameter (on a custom format) tells
88 * the driver which alternative power source to use.
89 * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
90 * this parameter (on a custom format) tells the driver which alternative
91 * slew rate to use.
92 * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if 92 * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
93 * you need to pass in custom configurations to the pin controller, use 93 * you need to pass in custom configurations to the pin controller, use
94 * PIN_CONFIG_END+1 as the base offset. 94 * PIN_CONFIG_END+1 as the base offset.
95 */ 95 */
96enum pin_config_param { 96enum pin_config_param {
97 PIN_CONFIG_BIAS_BUS_HOLD,
97 PIN_CONFIG_BIAS_DISABLE, 98 PIN_CONFIG_BIAS_DISABLE,
98 PIN_CONFIG_BIAS_HIGH_IMPEDANCE, 99 PIN_CONFIG_BIAS_HIGH_IMPEDANCE,
99 PIN_CONFIG_BIAS_BUS_HOLD,
100 PIN_CONFIG_BIAS_PULL_UP,
101 PIN_CONFIG_BIAS_PULL_DOWN, 100 PIN_CONFIG_BIAS_PULL_DOWN,
102 PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 101 PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
103 PIN_CONFIG_DRIVE_PUSH_PULL, 102 PIN_CONFIG_BIAS_PULL_UP,
104 PIN_CONFIG_DRIVE_OPEN_DRAIN, 103 PIN_CONFIG_DRIVE_OPEN_DRAIN,
105 PIN_CONFIG_DRIVE_OPEN_SOURCE, 104 PIN_CONFIG_DRIVE_OPEN_SOURCE,
105 PIN_CONFIG_DRIVE_PUSH_PULL,
106 PIN_CONFIG_DRIVE_STRENGTH, 106 PIN_CONFIG_DRIVE_STRENGTH,
107 PIN_CONFIG_INPUT_DEBOUNCE,
107 PIN_CONFIG_INPUT_ENABLE, 108 PIN_CONFIG_INPUT_ENABLE,
108 PIN_CONFIG_INPUT_SCHMITT_ENABLE,
109 PIN_CONFIG_INPUT_SCHMITT, 109 PIN_CONFIG_INPUT_SCHMITT,
110 PIN_CONFIG_INPUT_DEBOUNCE, 110 PIN_CONFIG_INPUT_SCHMITT_ENABLE,
111 PIN_CONFIG_POWER_SOURCE,
112 PIN_CONFIG_SLEW_RATE,
113 PIN_CONFIG_LOW_POWER_MODE, 111 PIN_CONFIG_LOW_POWER_MODE,
114 PIN_CONFIG_OUTPUT, 112 PIN_CONFIG_OUTPUT,
113 PIN_CONFIG_POWER_SOURCE,
114 PIN_CONFIG_SLEW_RATE,
115 PIN_CONFIG_END = 0x7FFF, 115 PIN_CONFIG_END = 0x7FFF,
116}; 116};
117 117
diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h
index b5919f8e6d1a..23073519339f 100644
--- a/include/linux/pinctrl/pinctrl-state.h
+++ b/include/linux/pinctrl/pinctrl-state.h
@@ -9,6 +9,13 @@
9 * hogs to configure muxing and pins at boot, and also as a state 9 * hogs to configure muxing and pins at boot, and also as a state
10 * to go into when returning from sleep and idle in 10 * to go into when returning from sleep and idle in
11 * .pm_runtime_resume() or ordinary .resume() for example. 11 * .pm_runtime_resume() or ordinary .resume() for example.
12 * @PINCTRL_STATE_INIT: normally the pinctrl will be set to "default"
13 * before the driver's probe() function is called. There are some
14 * drivers where that is not appropriate becausing doing so would
15 * glitch the pins. In those cases you can add an "init" pinctrl
16 * which is the state of the pins before drive probe. After probe
17 * if the pins are still in "init" state they'll be moved to
18 * "default".
12 * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into 19 * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into
13 * when the pins are idle. This is a state where the system is relaxed 20 * when the pins are idle. This is a state where the system is relaxed
14 * but not fully sleeping - some power may be on but clocks gated for 21 * but not fully sleeping - some power may be on but clocks gated for
@@ -20,5 +27,6 @@
20 * ordinary .suspend() function. 27 * ordinary .suspend() function.
21 */ 28 */
22#define PINCTRL_STATE_DEFAULT "default" 29#define PINCTRL_STATE_DEFAULT "default"
30#define PINCTRL_STATE_INIT "init"
23#define PINCTRL_STATE_IDLE "idle" 31#define PINCTRL_STATE_IDLE "idle"
24#define PINCTRL_STATE_SLEEP "sleep" 32#define PINCTRL_STATE_SLEEP "sleep"
diff --git a/include/linux/platform_data/leds-kirkwood-netxbig.h b/include/linux/platform_data/leds-kirkwood-netxbig.h
index d2be19a51acd..3c85a735c380 100644
--- a/include/linux/platform_data/leds-kirkwood-netxbig.h
+++ b/include/linux/platform_data/leds-kirkwood-netxbig.h
@@ -40,6 +40,7 @@ struct netxbig_led {
40 int mode_addr; 40 int mode_addr;
41 int *mode_val; 41 int *mode_val;
42 int bright_addr; 42 int bright_addr;
43 int bright_max;
43}; 44};
44 45
45struct netxbig_led_platform_data { 46struct netxbig_led_platform_data {
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 1d2cd21242e8..54bf1484d41f 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -48,9 +48,9 @@ struct pps_source_info {
48 48
49struct pps_event_time { 49struct pps_event_time {
50#ifdef CONFIG_NTP_PPS 50#ifdef CONFIG_NTP_PPS
51 struct timespec ts_raw; 51 struct timespec64 ts_raw;
52#endif /* CONFIG_NTP_PPS */ 52#endif /* CONFIG_NTP_PPS */
53 struct timespec ts_real; 53 struct timespec64 ts_real;
54}; 54};
55 55
56/* The main struct */ 56/* The main struct */
@@ -105,7 +105,7 @@ extern void pps_event(struct pps_device *pps,
105struct pps_device *pps_lookup_dev(void const *cookie); 105struct pps_device *pps_lookup_dev(void const *cookie);
106 106
107static inline void timespec_to_pps_ktime(struct pps_ktime *kt, 107static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
108 struct timespec ts) 108 struct timespec64 ts)
109{ 109{
110 kt->sec = ts.tv_sec; 110 kt->sec = ts.tv_sec;
111 kt->nsec = ts.tv_nsec; 111 kt->nsec = ts.tv_nsec;
@@ -115,24 +115,24 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
115 115
116static inline void pps_get_ts(struct pps_event_time *ts) 116static inline void pps_get_ts(struct pps_event_time *ts)
117{ 117{
118 getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); 118 ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real);
119} 119}
120 120
121#else /* CONFIG_NTP_PPS */ 121#else /* CONFIG_NTP_PPS */
122 122
123static inline void pps_get_ts(struct pps_event_time *ts) 123static inline void pps_get_ts(struct pps_event_time *ts)
124{ 124{
125 getnstimeofday(&ts->ts_real); 125 ktime_get_real_ts64(&ts->ts_real);
126} 126}
127 127
128#endif /* CONFIG_NTP_PPS */ 128#endif /* CONFIG_NTP_PPS */
129 129
130/* Subtract known time delay from PPS event time(s) */ 130/* Subtract known time delay from PPS event time(s) */
131static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta) 131static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta)
132{ 132{
133 ts->ts_real = timespec_sub(ts->ts_real, delta); 133 ts->ts_real = timespec64_sub(ts->ts_real, delta);
134#ifdef CONFIG_NTP_PPS 134#ifdef CONFIG_NTP_PPS
135 ts->ts_raw = timespec_sub(ts->ts_raw, delta); 135 ts->ts_raw = timespec64_sub(ts->ts_raw, delta);
136#endif 136#endif
137} 137}
138 138
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index bea8dd8ff5e0..75e4e30677f1 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -26,7 +26,6 @@
26 * SOFTIRQ_MASK: 0x0000ff00 26 * SOFTIRQ_MASK: 0x0000ff00
27 * HARDIRQ_MASK: 0x000f0000 27 * HARDIRQ_MASK: 0x000f0000
28 * NMI_MASK: 0x00100000 28 * NMI_MASK: 0x00100000
29 * PREEMPT_ACTIVE: 0x00200000
30 * PREEMPT_NEED_RESCHED: 0x80000000 29 * PREEMPT_NEED_RESCHED: 0x80000000
31 */ 30 */
32#define PREEMPT_BITS 8 31#define PREEMPT_BITS 8
@@ -53,10 +52,6 @@
53 52
54#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) 53#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
55 54
56#define PREEMPT_ACTIVE_BITS 1
57#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
58#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
59
60/* We use the MSB mostly because its available */ 55/* We use the MSB mostly because its available */
61#define PREEMPT_NEED_RESCHED 0x80000000 56#define PREEMPT_NEED_RESCHED 0x80000000
62 57
@@ -126,8 +121,7 @@
126 * Check whether we were atomic before we did preempt_disable(): 121 * Check whether we were atomic before we did preempt_disable():
127 * (used by the scheduler) 122 * (used by the scheduler)
128 */ 123 */
129#define in_atomic_preempt_off() \ 124#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
130 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
131 125
132#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 126#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
133extern void preempt_count_add(int val); 127extern void preempt_count_add(int val);
@@ -146,18 +140,6 @@ extern void preempt_count_sub(int val);
146#define preempt_count_inc() preempt_count_add(1) 140#define preempt_count_inc() preempt_count_add(1)
147#define preempt_count_dec() preempt_count_sub(1) 141#define preempt_count_dec() preempt_count_sub(1)
148 142
149#define preempt_active_enter() \
150do { \
151 preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
152 barrier(); \
153} while (0)
154
155#define preempt_active_exit() \
156do { \
157 barrier(); \
158 preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
159} while (0)
160
161#ifdef CONFIG_PREEMPT_COUNT 143#ifdef CONFIG_PREEMPT_COUNT
162 144
163#define preempt_disable() \ 145#define preempt_disable() \
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
new file mode 100644
index 000000000000..a63a33e6196e
--- /dev/null
+++ b/include/linux/rcu_sync.h
@@ -0,0 +1,86 @@
1/*
2 * RCU-based infrastructure for lightweight reader-writer locking
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (c) 2015, Red Hat, Inc.
19 *
20 * Author: Oleg Nesterov <oleg@redhat.com>
21 */
22
23#ifndef _LINUX_RCU_SYNC_H_
24#define _LINUX_RCU_SYNC_H_
25
26#include <linux/wait.h>
27#include <linux/rcupdate.h>
28
29enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC };
30
31/* Structure to mediate between updaters and fastpath-using readers. */
32struct rcu_sync {
33 int gp_state;
34 int gp_count;
35 wait_queue_head_t gp_wait;
36
37 int cb_state;
38 struct rcu_head cb_head;
39
40 enum rcu_sync_type gp_type;
41};
42
43extern void rcu_sync_lockdep_assert(struct rcu_sync *);
44
45/**
46 * rcu_sync_is_idle() - Are readers permitted to use their fastpaths?
47 * @rsp: Pointer to rcu_sync structure to use for synchronization
48 *
49 * Returns true if readers are permitted to use their fastpaths.
50 * Must be invoked within an RCU read-side critical section whose
51 * flavor matches that of the rcu_sync struture.
52 */
53static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
54{
55#ifdef CONFIG_PROVE_RCU
56 rcu_sync_lockdep_assert(rsp);
57#endif
58 return !rsp->gp_state; /* GP_IDLE */
59}
60
61extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
62extern void rcu_sync_enter(struct rcu_sync *);
63extern void rcu_sync_exit(struct rcu_sync *);
64extern void rcu_sync_dtor(struct rcu_sync *);
65
66#define __RCU_SYNC_INITIALIZER(name, type) { \
67 .gp_state = 0, \
68 .gp_count = 0, \
69 .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \
70 .cb_state = 0, \
71 .gp_type = type, \
72 }
73
74#define __DEFINE_RCU_SYNC(name, type) \
75 struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type)
76
77#define DEFINE_RCU_SYNC(name) \
78 __DEFINE_RCU_SYNC(name, RCU_SYNC)
79
80#define DEFINE_RCU_SCHED_SYNC(name) \
81 __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC)
82
83#define DEFINE_RCU_BH_SYNC(name) \
84 __DEFINE_RCU_SYNC(name, RCU_BH_SYNC)
85
86#endif /* _LINUX_RCU_SYNC_H_ */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 17c6b1f84a77..5ed540986019 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -247,10 +247,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
247 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 247 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
248 */ 248 */
249#define list_entry_rcu(ptr, type, member) \ 249#define list_entry_rcu(ptr, type, member) \
250({ \ 250 container_of(lockless_dereference(ptr), type, member)
251 typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
252 container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
253})
254 251
255/** 252/**
256 * Where are list_empty_rcu() and list_first_entry_rcu()? 253 * Where are list_empty_rcu() and list_first_entry_rcu()?
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 581abf848566..a0189ba67fde 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -160,7 +160,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
160 * more than one CPU). 160 * more than one CPU).
161 */ 161 */
162void call_rcu(struct rcu_head *head, 162void call_rcu(struct rcu_head *head,
163 void (*func)(struct rcu_head *head)); 163 rcu_callback_t func);
164 164
165#else /* #ifdef CONFIG_PREEMPT_RCU */ 165#else /* #ifdef CONFIG_PREEMPT_RCU */
166 166
@@ -191,7 +191,7 @@ void call_rcu(struct rcu_head *head,
191 * memory ordering guarantees. 191 * memory ordering guarantees.
192 */ 192 */
193void call_rcu_bh(struct rcu_head *head, 193void call_rcu_bh(struct rcu_head *head,
194 void (*func)(struct rcu_head *head)); 194 rcu_callback_t func);
195 195
196/** 196/**
197 * call_rcu_sched() - Queue an RCU for invocation after sched grace period. 197 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
@@ -213,7 +213,7 @@ void call_rcu_bh(struct rcu_head *head,
213 * memory ordering guarantees. 213 * memory ordering guarantees.
214 */ 214 */
215void call_rcu_sched(struct rcu_head *head, 215void call_rcu_sched(struct rcu_head *head,
216 void (*func)(struct rcu_head *rcu)); 216 rcu_callback_t func);
217 217
218void synchronize_sched(void); 218void synchronize_sched(void);
219 219
@@ -274,7 +274,7 @@ do { \
274 * See the description of call_rcu() for more detailed information on 274 * See the description of call_rcu() for more detailed information on
275 * memory ordering guarantees. 275 * memory ordering guarantees.
276 */ 276 */
277void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); 277void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
278void synchronize_rcu_tasks(void); 278void synchronize_rcu_tasks(void);
279void rcu_barrier_tasks(void); 279void rcu_barrier_tasks(void);
280 280
@@ -297,12 +297,14 @@ void synchronize_rcu(void);
297 297
298static inline void __rcu_read_lock(void) 298static inline void __rcu_read_lock(void)
299{ 299{
300 preempt_disable(); 300 if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
301 preempt_disable();
301} 302}
302 303
303static inline void __rcu_read_unlock(void) 304static inline void __rcu_read_unlock(void)
304{ 305{
305 preempt_enable(); 306 if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
307 preempt_enable();
306} 308}
307 309
308static inline void synchronize_rcu(void) 310static inline void synchronize_rcu(void)
@@ -535,29 +537,9 @@ static inline int rcu_read_lock_sched_held(void)
535 537
536#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 538#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
537 539
538/* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */
539static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void)
540{
541}
542
543#ifdef CONFIG_PROVE_RCU 540#ifdef CONFIG_PROVE_RCU
544 541
545/** 542/**
546 * rcu_lockdep_assert - emit lockdep splat if specified condition not met
547 * @c: condition to check
548 * @s: informative message
549 */
550#define rcu_lockdep_assert(c, s) \
551 do { \
552 static bool __section(.data.unlikely) __warned; \
553 deprecate_rcu_lockdep_assert(); \
554 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
555 __warned = true; \
556 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
557 } \
558 } while (0)
559
560/**
561 * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met 543 * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
562 * @c: condition to check 544 * @c: condition to check
563 * @s: informative message 545 * @s: informative message
@@ -594,7 +576,6 @@ static inline void rcu_preempt_sleep_check(void)
594 576
595#else /* #ifdef CONFIG_PROVE_RCU */ 577#else /* #ifdef CONFIG_PROVE_RCU */
596 578
597#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert()
598#define RCU_LOCKDEP_WARN(c, s) do { } while (0) 579#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
599#define rcu_sleep_check() do { } while (0) 580#define rcu_sleep_check() do { } while (0)
600 581
@@ -811,6 +792,28 @@ static inline void rcu_preempt_sleep_check(void)
811#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) 792#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
812 793
813/** 794/**
795 * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
796 * @p: The pointer to hand off
797 *
798 * This is simply an identity function, but it documents where a pointer
799 * is handed off from RCU to some other synchronization mechanism, for
800 * example, reference counting or locking. In C11, it would map to
801 * kill_dependency(). It could be used as follows:
802 *
803 * rcu_read_lock();
804 * p = rcu_dereference(gp);
805 * long_lived = is_long_lived(p);
806 * if (long_lived) {
807 * if (!atomic_inc_not_zero(p->refcnt))
808 * long_lived = false;
809 * else
810 * p = rcu_pointer_handoff(p);
811 * }
812 * rcu_read_unlock();
813 */
814#define rcu_pointer_handoff(p) (p)
815
816/**
814 * rcu_read_lock() - mark the beginning of an RCU read-side critical section 817 * rcu_read_lock() - mark the beginning of an RCU read-side critical section
815 * 818 *
816 * When synchronize_rcu() is invoked on one CPU while other CPUs 819 * When synchronize_rcu() is invoked on one CPU while other CPUs
@@ -1065,7 +1068,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
1065#define __kfree_rcu(head, offset) \ 1068#define __kfree_rcu(head, offset) \
1066 do { \ 1069 do { \
1067 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ 1070 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
1068 kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \ 1071 kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
1069 } while (0) 1072 } while (0)
1070 1073
1071/** 1074/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index ff968b7af3a4..4c1aaf9cce7b 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -83,7 +83,7 @@ static inline void synchronize_sched_expedited(void)
83} 83}
84 84
85static inline void kfree_call_rcu(struct rcu_head *head, 85static inline void kfree_call_rcu(struct rcu_head *head,
86 void (*func)(struct rcu_head *rcu)) 86 rcu_callback_t func)
87{ 87{
88 call_rcu(head, func); 88 call_rcu(head, func);
89} 89}
@@ -216,6 +216,7 @@ static inline bool rcu_is_watching(void)
216 216
217static inline void rcu_all_qs(void) 217static inline void rcu_all_qs(void)
218{ 218{
219 barrier(); /* Avoid RCU read-side critical sections leaking across. */
219} 220}
220 221
221#endif /* __LINUX_RCUTINY_H */ 222#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5abec82f325e..60d15a080d7c 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -48,7 +48,7 @@ void synchronize_rcu_bh(void);
48void synchronize_sched_expedited(void); 48void synchronize_sched_expedited(void);
49void synchronize_rcu_expedited(void); 49void synchronize_rcu_expedited(void);
50 50
51void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 51void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
52 52
53/** 53/**
54 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period 54 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 8fc0bfd8edc4..d68bb402120e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -296,6 +296,8 @@ typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
296 unsigned int *val); 296 unsigned int *val);
297typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg, 297typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
298 unsigned int val); 298 unsigned int val);
299typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
300 unsigned int mask, unsigned int val);
299typedef struct regmap_async *(*regmap_hw_async_alloc)(void); 301typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
300typedef void (*regmap_hw_free_context)(void *context); 302typedef void (*regmap_hw_free_context)(void *context);
301 303
@@ -335,6 +337,7 @@ struct regmap_bus {
335 regmap_hw_gather_write gather_write; 337 regmap_hw_gather_write gather_write;
336 regmap_hw_async_write async_write; 338 regmap_hw_async_write async_write;
337 regmap_hw_reg_write reg_write; 339 regmap_hw_reg_write reg_write;
340 regmap_hw_reg_update_bits reg_update_bits;
338 regmap_hw_read read; 341 regmap_hw_read read;
339 regmap_hw_reg_read reg_read; 342 regmap_hw_reg_read reg_read;
340 regmap_hw_free_context free_context; 343 regmap_hw_free_context free_context;
@@ -791,6 +794,9 @@ struct regmap_irq {
791 unsigned int mask; 794 unsigned int mask;
792}; 795};
793 796
797#define REGMAP_IRQ_REG(_irq, _off, _mask) \
798 [_irq] = { .reg_offset = (_off), .mask = (_mask) }
799
794/** 800/**
795 * Description of a generic regmap irq_chip. This is not intended to 801 * Description of a generic regmap irq_chip. This is not intended to
796 * handle every possible interrupt controller, but it should handle a 802 * handle every possible interrupt controller, but it should handle a
@@ -800,6 +806,8 @@ struct regmap_irq {
800 * 806 *
801 * @status_base: Base status register address. 807 * @status_base: Base status register address.
802 * @mask_base: Base mask register address. 808 * @mask_base: Base mask register address.
809 * @unmask_base: Base unmask register address. for chips who have
810 * separate mask and unmask registers
803 * @ack_base: Base ack address. If zero then the chip is clear on read. 811 * @ack_base: Base ack address. If zero then the chip is clear on read.
804 * Using zero value is possible with @use_ack bit. 812 * Using zero value is possible with @use_ack bit.
805 * @wake_base: Base address for wake enables. If zero unsupported. 813 * @wake_base: Base address for wake enables. If zero unsupported.
@@ -807,6 +815,7 @@ struct regmap_irq {
807 * @init_ack_masked: Ack all masked interrupts once during initalization. 815 * @init_ack_masked: Ack all masked interrupts once during initalization.
808 * @mask_invert: Inverted mask register: cleared bits are masked out. 816 * @mask_invert: Inverted mask register: cleared bits are masked out.
809 * @use_ack: Use @ack register even if it is zero. 817 * @use_ack: Use @ack register even if it is zero.
818 * @ack_invert: Inverted ack register: cleared bits for ack.
810 * @wake_invert: Inverted wake register: cleared bits are wake enabled. 819 * @wake_invert: Inverted wake register: cleared bits are wake enabled.
811 * @runtime_pm: Hold a runtime PM lock on the device when accessing it. 820 * @runtime_pm: Hold a runtime PM lock on the device when accessing it.
812 * 821 *
@@ -820,12 +829,14 @@ struct regmap_irq_chip {
820 829
821 unsigned int status_base; 830 unsigned int status_base;
822 unsigned int mask_base; 831 unsigned int mask_base;
832 unsigned int unmask_base;
823 unsigned int ack_base; 833 unsigned int ack_base;
824 unsigned int wake_base; 834 unsigned int wake_base;
825 unsigned int irq_reg_stride; 835 unsigned int irq_reg_stride;
826 bool init_ack_masked:1; 836 bool init_ack_masked:1;
827 bool mask_invert:1; 837 bool mask_invert:1;
828 bool use_ack:1; 838 bool use_ack:1;
839 bool ack_invert:1;
829 bool wake_invert:1; 840 bool wake_invert:1;
830 bool runtime_pm:1; 841 bool runtime_pm:1;
831 842
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b7b9501b41af..9e1e06c3ce05 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -599,33 +599,42 @@ struct task_cputime_atomic {
599 .sum_exec_runtime = ATOMIC64_INIT(0), \ 599 .sum_exec_runtime = ATOMIC64_INIT(0), \
600 } 600 }
601 601
602#ifdef CONFIG_PREEMPT_COUNT 602#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
603#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) 603
604#else 604/*
605#define PREEMPT_DISABLED PREEMPT_ENABLED 605 * Disable preemption until the scheduler is running -- use an unconditional
606#endif 606 * value so that it also works on !PREEMPT_COUNT kernels.
607 *
608 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
609 */
610#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
607 611
608/* 612/*
609 * Disable preemption until the scheduler is running. 613 * Initial preempt_count value; reflects the preempt_count schedule invariant
610 * Reset by start_kernel()->sched_init()->init_idle(). 614 * which states that during context switches:
611 * 615 *
612 * We include PREEMPT_ACTIVE to avoid cond_resched() from working 616 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
613 * before the scheduler is active -- see should_resched(). 617 *
618 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
619 * Note: See finish_task_switch().
614 */ 620 */
615#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) 621#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
616 622
617/** 623/**
618 * struct thread_group_cputimer - thread group interval timer counts 624 * struct thread_group_cputimer - thread group interval timer counts
619 * @cputime_atomic: atomic thread group interval timers. 625 * @cputime_atomic: atomic thread group interval timers.
620 * @running: non-zero when there are timers running and 626 * @running: true when there are timers running and
621 * @cputime receives updates. 627 * @cputime_atomic receives updates.
628 * @checking_timer: true when a thread in the group is in the
629 * process of checking for thread group timers.
622 * 630 *
623 * This structure contains the version of task_cputime, above, that is 631 * This structure contains the version of task_cputime, above, that is
624 * used for thread group CPU timer calculations. 632 * used for thread group CPU timer calculations.
625 */ 633 */
626struct thread_group_cputimer { 634struct thread_group_cputimer {
627 struct task_cputime_atomic cputime_atomic; 635 struct task_cputime_atomic cputime_atomic;
628 int running; 636 bool running;
637 bool checking_timer;
629}; 638};
630 639
631#include <linux/rwsem.h> 640#include <linux/rwsem.h>
@@ -1139,8 +1148,6 @@ struct sched_domain_topology_level {
1139#endif 1148#endif
1140}; 1149};
1141 1150
1142extern struct sched_domain_topology_level *sched_domain_topology;
1143
1144extern void set_sched_topology(struct sched_domain_topology_level *tl); 1151extern void set_sched_topology(struct sched_domain_topology_level *tl);
1145extern void wake_up_if_idle(int cpu); 1152extern void wake_up_if_idle(int cpu);
1146 1153
@@ -1189,10 +1196,10 @@ struct load_weight {
1189 1196
1190/* 1197/*
1191 * The load_avg/util_avg accumulates an infinite geometric series. 1198 * The load_avg/util_avg accumulates an infinite geometric series.
1192 * 1) load_avg factors the amount of time that a sched_entity is 1199 * 1) load_avg factors frequency scaling into the amount of time that a
1193 * runnable on a rq into its weight. For cfs_rq, it is the aggregated 1200 * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the
1194 * such weights of all runnable and blocked sched_entities. 1201 * aggregated such weights of all runnable and blocked sched_entities.
1195 * 2) util_avg factors frequency scaling into the amount of time 1202 * 2) util_avg factors frequency and cpu scaling into the amount of time
1196 * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE]. 1203 * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
1197 * For cfs_rq, it is the aggregated such times of all runnable and 1204 * For cfs_rq, it is the aggregated such times of all runnable and
1198 * blocked sched_entities. 1205 * blocked sched_entities.
@@ -1342,10 +1349,12 @@ struct sched_dl_entity {
1342 1349
1343union rcu_special { 1350union rcu_special {
1344 struct { 1351 struct {
1345 bool blocked; 1352 u8 blocked;
1346 bool need_qs; 1353 u8 need_qs;
1347 } b; 1354 u8 exp_need_qs;
1348 short s; 1355 u8 pad; /* Otherwise the compiler can store garbage here. */
1356 } b; /* Bits. */
1357 u32 s; /* Set of bits. */
1349}; 1358};
1350struct rcu_node; 1359struct rcu_node;
1351 1360
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 9d303b8847df..9089a2ae913d 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -21,4 +21,9 @@ static inline int dl_task(struct task_struct *p)
21 return dl_prio(p->prio); 21 return dl_prio(p->prio);
22} 22}
23 23
24static inline bool dl_time_before(u64 a, u64 b)
25{
26 return (s64)(a - b) < 0;
27}
28
24#endif /* _SCHED_DEADLINE_H */ 29#endif /* _SCHED_DEADLINE_H */
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index e6109a6cd8f6..12910cf19869 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -24,9 +24,6 @@ struct smpboot_thread_data;
24 * parked (cpu offline) 24 * parked (cpu offline)
25 * @unpark: Optional unpark function, called when the thread is 25 * @unpark: Optional unpark function, called when the thread is
26 * unparked (cpu online) 26 * unparked (cpu online)
27 * @pre_unpark: Optional unpark function, called before the thread is
28 * unparked (cpu online). This is not guaranteed to be
29 * called on the target cpu of the thread. Careful!
30 * @cpumask: Internal state. To update which threads are unparked, 27 * @cpumask: Internal state. To update which threads are unparked,
31 * call smpboot_update_cpumask_percpu_thread(). 28 * call smpboot_update_cpumask_percpu_thread().
32 * @selfparking: Thread is not parked by the park function. 29 * @selfparking: Thread is not parked by the park function.
@@ -42,7 +39,6 @@ struct smp_hotplug_thread {
42 void (*cleanup)(unsigned int cpu, bool online); 39 void (*cleanup)(unsigned int cpu, bool online);
43 void (*park)(unsigned int cpu); 40 void (*park)(unsigned int cpu);
44 void (*unpark)(unsigned int cpu); 41 void (*unpark)(unsigned int cpu);
45 void (*pre_unpark)(unsigned int cpu);
46 cpumask_var_t cpumask; 42 cpumask_var_t cpumask;
47 bool selfparking; 43 bool selfparking;
48 const char *thread_comm; 44 const char *thread_comm;
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index bdeb4567b71e..f5f80c5643ac 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -215,8 +215,11 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
215 */ 215 */
216static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) 216static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
217{ 217{
218 int retval = __srcu_read_lock(sp); 218 int retval;
219 219
220 preempt_disable();
221 retval = __srcu_read_lock(sp);
222 preempt_enable();
220 rcu_lock_acquire(&(sp)->dep_map); 223 rcu_lock_acquire(&(sp)->dep_map);
221 return retval; 224 return retval;
222} 225}
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 414d924318ce..0adedca24c5b 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -33,6 +33,8 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
33 struct cpu_stop_work *work_buf); 33 struct cpu_stop_work *work_buf);
34int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); 34int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
35int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); 35int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
36void stop_machine_park(int cpu);
37void stop_machine_unpark(int cpu);
36 38
37#else /* CONFIG_SMP */ 39#else /* CONFIG_SMP */
38 40
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index ba0ae09cbb21..ec89d846324c 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -263,8 +263,8 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
263/* 263/*
264 * PPS accessor 264 * PPS accessor
265 */ 265 */
266extern void getnstime_raw_and_real(struct timespec *ts_raw, 266extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
267 struct timespec *ts_real); 267 struct timespec64 *ts_real);
268 268
269/* 269/*
270 * Persistent clock related interfaces 270 * Persistent clock related interfaces
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 9d3f1a5b6178..39c25dbebfe8 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -152,7 +152,7 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
152#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) 152#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
153 153
154extern int do_adjtimex(struct timex *); 154extern int do_adjtimex(struct timex *);
155extern void hardpps(const struct timespec *, const struct timespec *); 155extern void hardpps(const struct timespec64 *, const struct timespec64 *);
156 156
157int read_current_timer(unsigned long *timer_val); 157int read_current_timer(unsigned long *timer_val);
158void ntp_notify_cmos_timer(void); 158void ntp_notify_cmos_timer(void);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 0ec598381f97..3bff87a25a42 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -182,22 +182,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
182# endif 182# endif
183#endif 183#endif
184 184
185struct vmalloc_info {
186 unsigned long used;
187 unsigned long largest_chunk;
188};
189
190#ifdef CONFIG_MMU 185#ifdef CONFIG_MMU
191#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) 186#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
192extern void get_vmalloc_info(struct vmalloc_info *vmi);
193#else 187#else
194
195#define VMALLOC_TOTAL 0UL 188#define VMALLOC_TOTAL 0UL
196#define get_vmalloc_info(vmi) \
197do { \
198 (vmi)->used = 0; \
199 (vmi)->largest_chunk = 0; \
200} while (0)
201#endif 189#endif
202 190
203#endif /* _LINUX_VMALLOC_H */ 191#endif /* _LINUX_VMALLOC_H */
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index af9d5382f6cb..ce009710120c 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -60,6 +60,38 @@ static inline struct metadata_dst *tun_rx_dst(int md_size)
60 return tun_dst; 60 return tun_dst;
61} 61}
62 62
63static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
64{
65 struct metadata_dst *md_dst = skb_metadata_dst(skb);
66 int md_size = md_dst->u.tun_info.options_len;
67 struct metadata_dst *new_md;
68
69 if (!md_dst)
70 return ERR_PTR(-EINVAL);
71
72 new_md = metadata_dst_alloc(md_size, GFP_ATOMIC);
73 if (!new_md)
74 return ERR_PTR(-ENOMEM);
75
76 memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
77 sizeof(struct ip_tunnel_info) + md_size);
78 skb_dst_drop(skb);
79 dst_hold(&new_md->dst);
80 skb_dst_set(skb, &new_md->dst);
81 return new_md;
82}
83
84static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
85{
86 struct metadata_dst *dst;
87
88 dst = tun_dst_unclone(skb);
89 if (IS_ERR(dst))
90 return NULL;
91
92 return &dst->u.tun_info;
93}
94
63static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, 95static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
64 __be16 flags, 96 __be16 flags,
65 __be64 tunnel_id, 97 __be64 tunnel_id,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 884e728b09d9..26ede14597da 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -86,7 +86,7 @@
86 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ 86 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
87 SNDRV_CTL_ELEM_ACCESS_READWRITE, \ 87 SNDRV_CTL_ELEM_ACCESS_READWRITE, \
88 .tlv.p = (tlv_array),\ 88 .tlv.p = (tlv_array),\
89 .info = snd_soc_info_volsw, \ 89 .info = snd_soc_info_volsw_sx, \
90 .get = snd_soc_get_volsw_sx,\ 90 .get = snd_soc_get_volsw_sx,\
91 .put = snd_soc_put_volsw_sx, \ 91 .put = snd_soc_put_volsw_sx, \
92 .private_value = (unsigned long)&(struct soc_mixer_control) \ 92 .private_value = (unsigned long)&(struct soc_mixer_control) \
@@ -156,7 +156,7 @@
156 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ 156 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
157 SNDRV_CTL_ELEM_ACCESS_READWRITE, \ 157 SNDRV_CTL_ELEM_ACCESS_READWRITE, \
158 .tlv.p = (tlv_array), \ 158 .tlv.p = (tlv_array), \
159 .info = snd_soc_info_volsw, \ 159 .info = snd_soc_info_volsw_sx, \
160 .get = snd_soc_get_volsw_sx, \ 160 .get = snd_soc_get_volsw_sx, \
161 .put = snd_soc_put_volsw_sx, \ 161 .put = snd_soc_put_volsw_sx, \
162 .private_value = (unsigned long)&(struct soc_mixer_control) \ 162 .private_value = (unsigned long)&(struct soc_mixer_control) \
@@ -574,6 +574,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
574 struct snd_ctl_elem_value *ucontrol); 574 struct snd_ctl_elem_value *ucontrol);
575int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, 575int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
576 struct snd_ctl_elem_info *uinfo); 576 struct snd_ctl_elem_info *uinfo);
577int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
578 struct snd_ctl_elem_info *uinfo);
577#define snd_soc_info_bool_ext snd_ctl_boolean_mono_info 579#define snd_soc_info_bool_ext snd_ctl_boolean_mono_info
578int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, 580int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
579 struct snd_ctl_elem_value *ucontrol); 581 struct snd_ctl_elem_value *ucontrol);
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
index 898be3a8db9a..6d8f8fba3341 100644
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -119,7 +119,7 @@
119#define WM8904_MIC_REGS 2 119#define WM8904_MIC_REGS 2
120#define WM8904_GPIO_REGS 4 120#define WM8904_GPIO_REGS 4
121#define WM8904_DRC_REGS 4 121#define WM8904_DRC_REGS 4
122#define WM8904_EQ_REGS 25 122#define WM8904_EQ_REGS 24
123 123
124/** 124/**
125 * DRC configurations are specified with a label and a set of register 125 * DRC configurations are specified with a label and a set of register
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 539d6bc3216a..9b90c57517a9 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -104,22 +104,17 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
104 TP_ARGS(p)); 104 TP_ARGS(p));
105 105
106#ifdef CREATE_TRACE_POINTS 106#ifdef CREATE_TRACE_POINTS
107static inline long __trace_sched_switch_state(struct task_struct *p) 107static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
108{ 108{
109 long state = p->state;
110
111#ifdef CONFIG_PREEMPT
112#ifdef CONFIG_SCHED_DEBUG 109#ifdef CONFIG_SCHED_DEBUG
113 BUG_ON(p != current); 110 BUG_ON(p != current);
114#endif /* CONFIG_SCHED_DEBUG */ 111#endif /* CONFIG_SCHED_DEBUG */
112
115 /* 113 /*
116 * For all intents and purposes a preempted task is a running task. 114 * Preemption ignores task state, therefore preempted tasks are always
115 * RUNNING (we will not have dequeued if state != RUNNING).
117 */ 116 */
118 if (preempt_count() & PREEMPT_ACTIVE) 117 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
119 state = TASK_RUNNING | TASK_STATE_MAX;
120#endif /* CONFIG_PREEMPT */
121
122 return state;
123} 118}
124#endif /* CREATE_TRACE_POINTS */ 119#endif /* CREATE_TRACE_POINTS */
125 120
@@ -128,10 +123,11 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
128 */ 123 */
129TRACE_EVENT(sched_switch, 124TRACE_EVENT(sched_switch,
130 125
131 TP_PROTO(struct task_struct *prev, 126 TP_PROTO(bool preempt,
127 struct task_struct *prev,
132 struct task_struct *next), 128 struct task_struct *next),
133 129
134 TP_ARGS(prev, next), 130 TP_ARGS(preempt, prev, next),
135 131
136 TP_STRUCT__entry( 132 TP_STRUCT__entry(
137 __array( char, prev_comm, TASK_COMM_LEN ) 133 __array( char, prev_comm, TASK_COMM_LEN )
@@ -147,7 +143,7 @@ TRACE_EVENT(sched_switch,
147 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); 143 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
148 __entry->prev_pid = prev->pid; 144 __entry->prev_pid = prev->pid;
149 __entry->prev_prio = prev->prio; 145 __entry->prev_prio = prev->prio;
150 __entry->prev_state = __trace_sched_switch_state(prev); 146 __entry->prev_state = __trace_sched_switch_state(preempt, prev);
151 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); 147 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
152 __entry->next_pid = next->pid; 148 __entry->next_pid = next->pid;
153 __entry->next_prio = next->prio; 149 __entry->next_prio = next->prio;
diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
index 1f5e68923929..7e385b83b9d8 100644
--- a/include/uapi/linux/mmc/ioctl.h
+++ b/include/uapi/linux/mmc/ioctl.h
@@ -45,8 +45,24 @@ struct mmc_ioc_cmd {
45}; 45};
46#define mmc_ioc_cmd_set_data(ic, ptr) ic.data_ptr = (__u64)(unsigned long) ptr 46#define mmc_ioc_cmd_set_data(ic, ptr) ic.data_ptr = (__u64)(unsigned long) ptr
47 47
48#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd) 48/**
49 * struct mmc_ioc_multi_cmd - multi command information
50 * @num_of_cmds: Number of commands to send. Must be equal to or less than
51 * MMC_IOC_MAX_CMDS.
52 * @cmds: Array of commands with length equal to 'num_of_cmds'
53 */
54struct mmc_ioc_multi_cmd {
55 __u64 num_of_cmds;
56 struct mmc_ioc_cmd cmds[0];
57};
49 58
59#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
60/*
61 * MMC_IOC_MULTI_CMD: Used to send an array of MMC commands described by
62 * the structure mmc_ioc_multi_cmd. The MMC driver will issue all
63 * commands in array in sequence to card.
64 */
65#define MMC_IOC_MULTI_CMD _IOWR(MMC_BLOCK_MAJOR, 1, struct mmc_ioc_multi_cmd)
50/* 66/*
51 * Since this ioctl is only meant to enhance (and not replace) normal access 67 * Since this ioctl is only meant to enhance (and not replace) normal access
52 * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES 68 * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
@@ -54,4 +70,5 @@ struct mmc_ioc_cmd {
54 * block device operations. 70 * block device operations.
55 */ 71 */
56#define MMC_IOC_MAX_BYTES (512L * 256) 72#define MMC_IOC_MAX_BYTES (512L * 256)
73#define MMC_IOC_MAX_CMDS 255
57#endif /* LINUX_MMC_IOCTL_H */ 74#endif /* LINUX_MMC_IOCTL_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 036f73bc54cd..e663627a8ef3 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -620,7 +620,8 @@ struct ovs_action_hash {
620 * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action. 620 * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
621 * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack 621 * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack
622 * table. This allows future packets for the same connection to be identified 622 * table. This allows future packets for the same connection to be identified
623 * as 'established' or 'related'. 623 * as 'established' or 'related'. The flow key for the current packet will
624 * retain the pre-commit connection state.
624 * @OVS_CT_ATTR_ZONE: u16 connection tracking zone. 625 * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
625 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the 626 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
626 * mask, the corresponding bit in the value is copied to the connection 627 * mask, the corresponding bit in the value is copied to the connection
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 2881145cda86..651221334f49 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -168,6 +168,7 @@ enum perf_branch_sample_type_shift {
168 168
169 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ 169 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
170 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ 170 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
171 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
171 172
172 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ 173 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
173}; 174};
@@ -188,6 +189,7 @@ enum perf_branch_sample_type {
188 189
189 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, 190 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
190 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, 191 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
192 PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
191 193
192 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, 194 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
193}; 195};
@@ -476,7 +478,7 @@ struct perf_event_mmap_page {
476 * u64 delta; 478 * u64 delta;
477 * 479 *
478 * quot = (cyc >> time_shift); 480 * quot = (cyc >> time_shift);
479 * rem = cyc & ((1 << time_shift) - 1); 481 * rem = cyc & (((u64)1 << time_shift) - 1);
480 * delta = time_offset + quot * time_mult + 482 * delta = time_offset + quot * time_mult +
481 * ((rem * time_mult) >> time_shift); 483 * ((rem * time_mult) >> time_shift);
482 * 484 *
@@ -507,7 +509,7 @@ struct perf_event_mmap_page {
507 * And vice versa: 509 * And vice versa:
508 * 510 *
509 * quot = cyc >> time_shift; 511 * quot = cyc >> time_shift;
510 * rem = cyc & ((1 << time_shift) - 1); 512 * rem = cyc & (((u64)1 << time_shift) - 1);
511 * timestamp = time_zero + quot * time_mult + 513 * timestamp = time_zero + quot * time_mult +
512 * ((rem * time_mult) >> time_shift); 514 * ((rem * time_mult) >> time_shift);
513 */ 515 */
diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
index 7530e7447620..8b8d39dfb67f 100644
--- a/include/uapi/linux/screen_info.h
+++ b/include/uapi/linux/screen_info.h
@@ -43,7 +43,8 @@ struct screen_info {
43 __u16 pages; /* 0x32 */ 43 __u16 pages; /* 0x32 */
44 __u16 vesa_attributes; /* 0x34 */ 44 __u16 vesa_attributes; /* 0x34 */
45 __u32 capabilities; /* 0x36 */ 45 __u32 capabilities; /* 0x36 */
46 __u8 _reserved[6]; /* 0x3a */ 46 __u32 ext_lfb_base; /* 0x3a */
47 __u8 _reserved[2]; /* 0x3e */
47} __attribute__((packed)); 48} __attribute__((packed));
48 49
49#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */ 50#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
@@ -69,6 +70,6 @@ struct screen_info {
69#define VIDEO_FLAGS_NOCURSOR (1 << 0) /* The video mode has no cursor set */ 70#define VIDEO_FLAGS_NOCURSOR (1 << 0) /* The video mode has no cursor set */
70 71
71#define VIDEO_CAPABILITY_SKIP_QUIRKS (1 << 0) 72#define VIDEO_CAPABILITY_SKIP_QUIRKS (1 << 0)
72 73#define VIDEO_CAPABILITY_64BIT_BASE (1 << 1) /* Frame buffer base is 64-bit */
73 74
74#endif /* _UAPI_SCREEN_INFO_H */ 75#endif /* _UAPI_SCREEN_INFO_H */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 82cf9dff4295..85ff5e26e23b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -102,19 +102,6 @@ void get_online_cpus(void)
102} 102}
103EXPORT_SYMBOL_GPL(get_online_cpus); 103EXPORT_SYMBOL_GPL(get_online_cpus);
104 104
105bool try_get_online_cpus(void)
106{
107 if (cpu_hotplug.active_writer == current)
108 return true;
109 if (!mutex_trylock(&cpu_hotplug.lock))
110 return false;
111 cpuhp_lock_acquire_tryread();
112 atomic_inc(&cpu_hotplug.refcount);
113 mutex_unlock(&cpu_hotplug.lock);
114 return true;
115}
116EXPORT_SYMBOL_GPL(try_get_online_cpus);
117
118void put_online_cpus(void) 105void put_online_cpus(void)
119{ 106{
120 int refcount; 107 int refcount;
@@ -304,8 +291,8 @@ static inline void check_for_tasks(int dead_cpu)
304{ 291{
305 struct task_struct *g, *p; 292 struct task_struct *g, *p;
306 293
307 read_lock_irq(&tasklist_lock); 294 read_lock(&tasklist_lock);
308 do_each_thread(g, p) { 295 for_each_process_thread(g, p) {
309 if (!p->on_rq) 296 if (!p->on_rq)
310 continue; 297 continue;
311 /* 298 /*
@@ -320,8 +307,8 @@ static inline void check_for_tasks(int dead_cpu)
320 307
321 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n", 308 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
322 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags); 309 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
323 } while_each_thread(g, p); 310 }
324 read_unlock_irq(&tasklist_lock); 311 read_unlock(&tasklist_lock);
325} 312}
326 313
327struct take_cpu_down_param { 314struct take_cpu_down_param {
@@ -344,7 +331,7 @@ static int take_cpu_down(void *_param)
344 /* Give up timekeeping duties */ 331 /* Give up timekeeping duties */
345 tick_handover_do_timer(); 332 tick_handover_do_timer();
346 /* Park the stopper thread */ 333 /* Park the stopper thread */
347 kthread_park(current); 334 stop_machine_park((long)param->hcpu);
348 return 0; 335 return 0;
349} 336}
350 337
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b11756f9b6dc..ea02109aee77 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -196,7 +196,7 @@ static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
196static int perf_sample_allowed_ns __read_mostly = 196static int perf_sample_allowed_ns __read_mostly =
197 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; 197 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
198 198
199void update_perf_cpu_limits(void) 199static void update_perf_cpu_limits(void)
200{ 200{
201 u64 tmp = perf_sample_period_ns; 201 u64 tmp = perf_sample_period_ns;
202 202
@@ -472,7 +472,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
472 * mode SWOUT : schedule out everything 472 * mode SWOUT : schedule out everything
473 * mode SWIN : schedule in based on cgroup for next 473 * mode SWIN : schedule in based on cgroup for next
474 */ 474 */
475void perf_cgroup_switch(struct task_struct *task, int mode) 475static void perf_cgroup_switch(struct task_struct *task, int mode)
476{ 476{
477 struct perf_cpu_context *cpuctx; 477 struct perf_cpu_context *cpuctx;
478 struct pmu *pmu; 478 struct pmu *pmu;
@@ -1939,7 +1939,7 @@ group_sched_in(struct perf_event *group_event,
1939 if (group_event->state == PERF_EVENT_STATE_OFF) 1939 if (group_event->state == PERF_EVENT_STATE_OFF)
1940 return 0; 1940 return 0;
1941 1941
1942 pmu->start_txn(pmu); 1942 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
1943 1943
1944 if (event_sched_in(group_event, cpuctx, ctx)) { 1944 if (event_sched_in(group_event, cpuctx, ctx)) {
1945 pmu->cancel_txn(pmu); 1945 pmu->cancel_txn(pmu);
@@ -3209,14 +3209,22 @@ void perf_event_exec(void)
3209 rcu_read_unlock(); 3209 rcu_read_unlock();
3210} 3210}
3211 3211
3212struct perf_read_data {
3213 struct perf_event *event;
3214 bool group;
3215 int ret;
3216};
3217
3212/* 3218/*
3213 * Cross CPU call to read the hardware event 3219 * Cross CPU call to read the hardware event
3214 */ 3220 */
3215static void __perf_event_read(void *info) 3221static void __perf_event_read(void *info)
3216{ 3222{
3217 struct perf_event *event = info; 3223 struct perf_read_data *data = info;
3224 struct perf_event *sub, *event = data->event;
3218 struct perf_event_context *ctx = event->ctx; 3225 struct perf_event_context *ctx = event->ctx;
3219 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 3226 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
3227 struct pmu *pmu = event->pmu;
3220 3228
3221 /* 3229 /*
3222 * If this is a task context, we need to check whether it is 3230 * If this is a task context, we need to check whether it is
@@ -3233,9 +3241,35 @@ static void __perf_event_read(void *info)
3233 update_context_time(ctx); 3241 update_context_time(ctx);
3234 update_cgrp_time_from_event(event); 3242 update_cgrp_time_from_event(event);
3235 } 3243 }
3244
3236 update_event_times(event); 3245 update_event_times(event);
3237 if (event->state == PERF_EVENT_STATE_ACTIVE) 3246 if (event->state != PERF_EVENT_STATE_ACTIVE)
3238 event->pmu->read(event); 3247 goto unlock;
3248
3249 if (!data->group) {
3250 pmu->read(event);
3251 data->ret = 0;
3252 goto unlock;
3253 }
3254
3255 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3256
3257 pmu->read(event);
3258
3259 list_for_each_entry(sub, &event->sibling_list, group_entry) {
3260 update_event_times(sub);
3261 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3262 /*
3263 * Use sibling's PMU rather than @event's since
3264 * sibling could be on different (eg: software) PMU.
3265 */
3266 sub->pmu->read(sub);
3267 }
3268 }
3269
3270 data->ret = pmu->commit_txn(pmu);
3271
3272unlock:
3239 raw_spin_unlock(&ctx->lock); 3273 raw_spin_unlock(&ctx->lock);
3240} 3274}
3241 3275
@@ -3300,15 +3334,23 @@ u64 perf_event_read_local(struct perf_event *event)
3300 return val; 3334 return val;
3301} 3335}
3302 3336
3303static u64 perf_event_read(struct perf_event *event) 3337static int perf_event_read(struct perf_event *event, bool group)
3304{ 3338{
3339 int ret = 0;
3340
3305 /* 3341 /*
3306 * If event is enabled and currently active on a CPU, update the 3342 * If event is enabled and currently active on a CPU, update the
3307 * value in the event structure: 3343 * value in the event structure:
3308 */ 3344 */
3309 if (event->state == PERF_EVENT_STATE_ACTIVE) { 3345 if (event->state == PERF_EVENT_STATE_ACTIVE) {
3346 struct perf_read_data data = {
3347 .event = event,
3348 .group = group,
3349 .ret = 0,
3350 };
3310 smp_call_function_single(event->oncpu, 3351 smp_call_function_single(event->oncpu,
3311 __perf_event_read, event, 1); 3352 __perf_event_read, &data, 1);
3353 ret = data.ret;
3312 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3354 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3313 struct perf_event_context *ctx = event->ctx; 3355 struct perf_event_context *ctx = event->ctx;
3314 unsigned long flags; 3356 unsigned long flags;
@@ -3323,11 +3365,14 @@ static u64 perf_event_read(struct perf_event *event)
3323 update_context_time(ctx); 3365 update_context_time(ctx);
3324 update_cgrp_time_from_event(event); 3366 update_cgrp_time_from_event(event);
3325 } 3367 }
3326 update_event_times(event); 3368 if (group)
3369 update_group_times(event);
3370 else
3371 update_event_times(event);
3327 raw_spin_unlock_irqrestore(&ctx->lock, flags); 3372 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3328 } 3373 }
3329 3374
3330 return perf_event_count(event); 3375 return ret;
3331} 3376}
3332 3377
3333/* 3378/*
@@ -3769,7 +3814,7 @@ static void put_event(struct perf_event *event)
3769 * see the comment there. 3814 * see the comment there.
3770 * 3815 *
3771 * 2) there is a lock-inversion with mmap_sem through 3816 * 2) there is a lock-inversion with mmap_sem through
3772 * perf_event_read_group(), which takes faults while 3817 * perf_read_group(), which takes faults while
3773 * holding ctx->mutex, however this is called after 3818 * holding ctx->mutex, however this is called after
3774 * the last filedesc died, so there is no possibility 3819 * the last filedesc died, so there is no possibility
3775 * to trigger the AB-BA case. 3820 * to trigger the AB-BA case.
@@ -3843,14 +3888,18 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3843 *running = 0; 3888 *running = 0;
3844 3889
3845 mutex_lock(&event->child_mutex); 3890 mutex_lock(&event->child_mutex);
3846 total += perf_event_read(event); 3891
3892 (void)perf_event_read(event, false);
3893 total += perf_event_count(event);
3894
3847 *enabled += event->total_time_enabled + 3895 *enabled += event->total_time_enabled +
3848 atomic64_read(&event->child_total_time_enabled); 3896 atomic64_read(&event->child_total_time_enabled);
3849 *running += event->total_time_running + 3897 *running += event->total_time_running +
3850 atomic64_read(&event->child_total_time_running); 3898 atomic64_read(&event->child_total_time_running);
3851 3899
3852 list_for_each_entry(child, &event->child_list, child_list) { 3900 list_for_each_entry(child, &event->child_list, child_list) {
3853 total += perf_event_read(child); 3901 (void)perf_event_read(child, false);
3902 total += perf_event_count(child);
3854 *enabled += child->total_time_enabled; 3903 *enabled += child->total_time_enabled;
3855 *running += child->total_time_running; 3904 *running += child->total_time_running;
3856 } 3905 }
@@ -3860,55 +3909,95 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3860} 3909}
3861EXPORT_SYMBOL_GPL(perf_event_read_value); 3910EXPORT_SYMBOL_GPL(perf_event_read_value);
3862 3911
3863static int perf_event_read_group(struct perf_event *event, 3912static int __perf_read_group_add(struct perf_event *leader,
3864 u64 read_format, char __user *buf) 3913 u64 read_format, u64 *values)
3865{ 3914{
3866 struct perf_event *leader = event->group_leader, *sub; 3915 struct perf_event *sub;
3867 struct perf_event_context *ctx = leader->ctx; 3916 int n = 1; /* skip @nr */
3868 int n = 0, size = 0, ret; 3917 int ret;
3869 u64 count, enabled, running;
3870 u64 values[5];
3871 3918
3872 lockdep_assert_held(&ctx->mutex); 3919 ret = perf_event_read(leader, true);
3920 if (ret)
3921 return ret;
3873 3922
3874 count = perf_event_read_value(leader, &enabled, &running); 3923 /*
3924 * Since we co-schedule groups, {enabled,running} times of siblings
3925 * will be identical to those of the leader, so we only publish one
3926 * set.
3927 */
3928 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3929 values[n++] += leader->total_time_enabled +
3930 atomic64_read(&leader->child_total_time_enabled);
3931 }
3875 3932
3876 values[n++] = 1 + leader->nr_siblings; 3933 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3877 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3934 values[n++] += leader->total_time_running +
3878 values[n++] = enabled; 3935 atomic64_read(&leader->child_total_time_running);
3879 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3936 }
3880 values[n++] = running; 3937
3881 values[n++] = count; 3938 /*
3939 * Write {count,id} tuples for every sibling.
3940 */
3941 values[n++] += perf_event_count(leader);
3882 if (read_format & PERF_FORMAT_ID) 3942 if (read_format & PERF_FORMAT_ID)
3883 values[n++] = primary_event_id(leader); 3943 values[n++] = primary_event_id(leader);
3884 3944
3885 size = n * sizeof(u64); 3945 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3946 values[n++] += perf_event_count(sub);
3947 if (read_format & PERF_FORMAT_ID)
3948 values[n++] = primary_event_id(sub);
3949 }
3886 3950
3887 if (copy_to_user(buf, values, size)) 3951 return 0;
3888 return -EFAULT; 3952}
3953
3954static int perf_read_group(struct perf_event *event,
3955 u64 read_format, char __user *buf)
3956{
3957 struct perf_event *leader = event->group_leader, *child;
3958 struct perf_event_context *ctx = leader->ctx;
3959 int ret;
3960 u64 *values;
3889 3961
3890 ret = size; 3962 lockdep_assert_held(&ctx->mutex);
3891 3963
3892 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 3964 values = kzalloc(event->read_size, GFP_KERNEL);
3893 n = 0; 3965 if (!values)
3966 return -ENOMEM;
3894 3967
3895 values[n++] = perf_event_read_value(sub, &enabled, &running); 3968 values[0] = 1 + leader->nr_siblings;
3896 if (read_format & PERF_FORMAT_ID)
3897 values[n++] = primary_event_id(sub);
3898 3969
3899 size = n * sizeof(u64); 3970 /*
3971 * By locking the child_mutex of the leader we effectively
3972 * lock the child list of all siblings.. XXX explain how.
3973 */
3974 mutex_lock(&leader->child_mutex);
3900 3975
3901 if (copy_to_user(buf + ret, values, size)) { 3976 ret = __perf_read_group_add(leader, read_format, values);
3902 return -EFAULT; 3977 if (ret)
3903 } 3978 goto unlock;
3904 3979
3905 ret += size; 3980 list_for_each_entry(child, &leader->child_list, child_list) {
3981 ret = __perf_read_group_add(child, read_format, values);
3982 if (ret)
3983 goto unlock;
3906 } 3984 }
3907 3985
3986 mutex_unlock(&leader->child_mutex);
3987
3988 ret = event->read_size;
3989 if (copy_to_user(buf, values, event->read_size))
3990 ret = -EFAULT;
3991 goto out;
3992
3993unlock:
3994 mutex_unlock(&leader->child_mutex);
3995out:
3996 kfree(values);
3908 return ret; 3997 return ret;
3909} 3998}
3910 3999
3911static int perf_event_read_one(struct perf_event *event, 4000static int perf_read_one(struct perf_event *event,
3912 u64 read_format, char __user *buf) 4001 u64 read_format, char __user *buf)
3913{ 4002{
3914 u64 enabled, running; 4003 u64 enabled, running;
@@ -3946,7 +4035,7 @@ static bool is_event_hup(struct perf_event *event)
3946 * Read the performance event - simple non blocking version for now 4035 * Read the performance event - simple non blocking version for now
3947 */ 4036 */
3948static ssize_t 4037static ssize_t
3949perf_read_hw(struct perf_event *event, char __user *buf, size_t count) 4038__perf_read(struct perf_event *event, char __user *buf, size_t count)
3950{ 4039{
3951 u64 read_format = event->attr.read_format; 4040 u64 read_format = event->attr.read_format;
3952 int ret; 4041 int ret;
@@ -3964,9 +4053,9 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3964 4053
3965 WARN_ON_ONCE(event->ctx->parent_ctx); 4054 WARN_ON_ONCE(event->ctx->parent_ctx);
3966 if (read_format & PERF_FORMAT_GROUP) 4055 if (read_format & PERF_FORMAT_GROUP)
3967 ret = perf_event_read_group(event, read_format, buf); 4056 ret = perf_read_group(event, read_format, buf);
3968 else 4057 else
3969 ret = perf_event_read_one(event, read_format, buf); 4058 ret = perf_read_one(event, read_format, buf);
3970 4059
3971 return ret; 4060 return ret;
3972} 4061}
@@ -3979,7 +4068,7 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3979 int ret; 4068 int ret;
3980 4069
3981 ctx = perf_event_ctx_lock(event); 4070 ctx = perf_event_ctx_lock(event);
3982 ret = perf_read_hw(event, buf, count); 4071 ret = __perf_read(event, buf, count);
3983 perf_event_ctx_unlock(event, ctx); 4072 perf_event_ctx_unlock(event, ctx);
3984 4073
3985 return ret; 4074 return ret;
@@ -4010,7 +4099,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
4010 4099
4011static void _perf_event_reset(struct perf_event *event) 4100static void _perf_event_reset(struct perf_event *event)
4012{ 4101{
4013 (void)perf_event_read(event); 4102 (void)perf_event_read(event, false);
4014 local64_set(&event->count, 0); 4103 local64_set(&event->count, 0);
4015 perf_event_update_userpage(event); 4104 perf_event_update_userpage(event);
4016} 4105}
@@ -7292,24 +7381,49 @@ static void perf_pmu_nop_void(struct pmu *pmu)
7292{ 7381{
7293} 7382}
7294 7383
7384static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
7385{
7386}
7387
7295static int perf_pmu_nop_int(struct pmu *pmu) 7388static int perf_pmu_nop_int(struct pmu *pmu)
7296{ 7389{
7297 return 0; 7390 return 0;
7298} 7391}
7299 7392
7300static void perf_pmu_start_txn(struct pmu *pmu) 7393static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
7394
7395static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
7301{ 7396{
7397 __this_cpu_write(nop_txn_flags, flags);
7398
7399 if (flags & ~PERF_PMU_TXN_ADD)
7400 return;
7401
7302 perf_pmu_disable(pmu); 7402 perf_pmu_disable(pmu);
7303} 7403}
7304 7404
7305static int perf_pmu_commit_txn(struct pmu *pmu) 7405static int perf_pmu_commit_txn(struct pmu *pmu)
7306{ 7406{
7407 unsigned int flags = __this_cpu_read(nop_txn_flags);
7408
7409 __this_cpu_write(nop_txn_flags, 0);
7410
7411 if (flags & ~PERF_PMU_TXN_ADD)
7412 return 0;
7413
7307 perf_pmu_enable(pmu); 7414 perf_pmu_enable(pmu);
7308 return 0; 7415 return 0;
7309} 7416}
7310 7417
7311static void perf_pmu_cancel_txn(struct pmu *pmu) 7418static void perf_pmu_cancel_txn(struct pmu *pmu)
7312{ 7419{
7420 unsigned int flags = __this_cpu_read(nop_txn_flags);
7421
7422 __this_cpu_write(nop_txn_flags, 0);
7423
7424 if (flags & ~PERF_PMU_TXN_ADD)
7425 return;
7426
7313 perf_pmu_enable(pmu); 7427 perf_pmu_enable(pmu);
7314} 7428}
7315 7429
@@ -7548,7 +7662,7 @@ got_cpu_context:
7548 pmu->commit_txn = perf_pmu_commit_txn; 7662 pmu->commit_txn = perf_pmu_commit_txn;
7549 pmu->cancel_txn = perf_pmu_cancel_txn; 7663 pmu->cancel_txn = perf_pmu_cancel_txn;
7550 } else { 7664 } else {
7551 pmu->start_txn = perf_pmu_nop_void; 7665 pmu->start_txn = perf_pmu_nop_txn;
7552 pmu->commit_txn = perf_pmu_nop_int; 7666 pmu->commit_txn = perf_pmu_nop_int;
7553 pmu->cancel_txn = perf_pmu_nop_void; 7667 pmu->cancel_txn = perf_pmu_nop_void;
7554 } 7668 }
@@ -7636,7 +7750,7 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
7636 return ret; 7750 return ret;
7637} 7751}
7638 7752
7639struct pmu *perf_init_event(struct perf_event *event) 7753static struct pmu *perf_init_event(struct perf_event *event)
7640{ 7754{
7641 struct pmu *pmu = NULL; 7755 struct pmu *pmu = NULL;
7642 int idx; 7756 int idx;
@@ -9345,14 +9459,6 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css,
9345 struct cgroup_subsys_state *old_css, 9459 struct cgroup_subsys_state *old_css,
9346 struct task_struct *task) 9460 struct task_struct *task)
9347{ 9461{
9348 /*
9349 * cgroup_exit() is called in the copy_process() failure path.
9350 * Ignore this case since the task hasn't ran yet, this avoids
9351 * trying to poke a half freed task state from generic code.
9352 */
9353 if (!(task->flags & PF_EXITING))
9354 return;
9355
9356 task_function_call(task, __perf_cgroup_move, task); 9462 task_function_call(task, __perf_cgroup_move, task);
9357} 9463}
9358 9464
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 182bc30899d5..b5d1ea79c595 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -141,7 +141,7 @@ int perf_output_begin(struct perf_output_handle *handle,
141 perf_output_get_handle(handle); 141 perf_output_get_handle(handle);
142 142
143 do { 143 do {
144 tail = READ_ONCE_CTRL(rb->user_page->data_tail); 144 tail = READ_ONCE(rb->user_page->data_tail);
145 offset = head = local_read(&rb->head); 145 offset = head = local_read(&rb->head);
146 if (!rb->overwrite && 146 if (!rb->overwrite &&
147 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size)) 147 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
diff --git a/kernel/exit.c b/kernel/exit.c
index ea95ee1b5ef7..07110c6020a0 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -706,10 +706,12 @@ void do_exit(long code)
706 smp_mb(); 706 smp_mb();
707 raw_spin_unlock_wait(&tsk->pi_lock); 707 raw_spin_unlock_wait(&tsk->pi_lock);
708 708
709 if (unlikely(in_atomic())) 709 if (unlikely(in_atomic())) {
710 pr_info("note: %s[%d] exited with preempt_count %d\n", 710 pr_info("note: %s[%d] exited with preempt_count %d\n",
711 current->comm, task_pid_nr(current), 711 current->comm, task_pid_nr(current),
712 preempt_count()); 712 preempt_count());
713 preempt_count_set(PREEMPT_ENABLED);
714 }
713 715
714 /* sync mm's RSS info before statistics gathering */ 716 /* sync mm's RSS info before statistics gathering */
715 if (tsk->mm) 717 if (tsk->mm)
@@ -761,7 +763,9 @@ void do_exit(long code)
761 */ 763 */
762 flush_ptrace_hw_breakpoint(tsk); 764 flush_ptrace_hw_breakpoint(tsk);
763 765
766 TASKS_RCU(preempt_disable());
764 TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu)); 767 TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
768 TASKS_RCU(preempt_enable());
765 exit_notify(tsk, group_dead); 769 exit_notify(tsk, group_dead);
766 proc_exit_connector(tsk); 770 proc_exit_connector(tsk);
767#ifdef CONFIG_NUMA 771#ifdef CONFIG_NUMA
diff --git a/kernel/fork.c b/kernel/fork.c
index 2845623fb582..6ac894244d39 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1101,7 +1101,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
1101 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 1101 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1102 if (cpu_limit != RLIM_INFINITY) { 1102 if (cpu_limit != RLIM_INFINITY) {
1103 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); 1103 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
1104 sig->cputimer.running = 1; 1104 sig->cputimer.running = true;
1105 } 1105 }
1106 1106
1107 /* The timer lists. */ 1107 /* The timer lists. */
diff --git a/kernel/futex.c b/kernel/futex.c
index 6e443efc65f4..dfc86e93c31d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -255,9 +255,18 @@ struct futex_hash_bucket {
255 struct plist_head chain; 255 struct plist_head chain;
256} ____cacheline_aligned_in_smp; 256} ____cacheline_aligned_in_smp;
257 257
258static unsigned long __read_mostly futex_hashsize; 258/*
259 * The base of the bucket array and its size are always used together
260 * (after initialization only in hash_futex()), so ensure that they
261 * reside in the same cacheline.
262 */
263static struct {
264 struct futex_hash_bucket *queues;
265 unsigned long hashsize;
266} __futex_data __read_mostly __aligned(2*sizeof(long));
267#define futex_queues (__futex_data.queues)
268#define futex_hashsize (__futex_data.hashsize)
259 269
260static struct futex_hash_bucket *futex_queues;
261 270
262/* 271/*
263 * Fault injections for futexes. 272 * Fault injections for futexes.
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 9a76e3beda54..3b48dab80164 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -30,6 +30,10 @@ config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
30config GENERIC_PENDING_IRQ 30config GENERIC_PENDING_IRQ
31 bool 31 bool
32 32
33# Support for generic irq migrating off cpu before the cpu is offline.
34config GENERIC_IRQ_MIGRATION
35 bool
36
33# Alpha specific irq affinity mechanism 37# Alpha specific irq affinity mechanism
34config AUTO_IRQ_AFFINITY 38config AUTO_IRQ_AFFINITY
35 bool 39 bool
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index d12123526e2b..2fc9cbdf35b6 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
5obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o 5obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
6obj-$(CONFIG_PROC_FS) += proc.o 6obj-$(CONFIG_PROC_FS) += proc.o
7obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 7obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
8obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
8obj-$(CONFIG_PM_SLEEP) += pm.o 9obj-$(CONFIG_PM_SLEEP) += pm.o
9obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o 10obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index e28169dd1c36..15206453b12a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -21,6 +21,20 @@
21 21
22#include "internals.h" 22#include "internals.h"
23 23
24static irqreturn_t bad_chained_irq(int irq, void *dev_id)
25{
26 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
27 return IRQ_NONE;
28}
29
30/*
31 * Chained handlers should never call action on their IRQ. This default
32 * action will emit warning if such thing happens.
33 */
34struct irqaction chained_action = {
35 .handler = bad_chained_irq,
36};
37
24/** 38/**
25 * irq_set_chip - set the irq chip for an irq 39 * irq_set_chip - set the irq chip for an irq
26 * @irq: irq number 40 * @irq: irq number
@@ -227,6 +241,13 @@ void irq_enable(struct irq_desc *desc)
227 * disabled. If an interrupt happens, then the interrupt flow 241 * disabled. If an interrupt happens, then the interrupt flow
228 * handler masks the line at the hardware level and marks it 242 * handler masks the line at the hardware level and marks it
229 * pending. 243 * pending.
244 *
245 * If the interrupt chip does not implement the irq_disable callback,
246 * a driver can disable the lazy approach for a particular irq line by
247 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
248 * be used for devices which cannot disable the interrupt at the
249 * device level under certain circumstances and have to use
250 * disable_irq[_nosync] instead.
230 */ 251 */
231void irq_disable(struct irq_desc *desc) 252void irq_disable(struct irq_desc *desc)
232{ 253{
@@ -234,6 +255,8 @@ void irq_disable(struct irq_desc *desc)
234 if (desc->irq_data.chip->irq_disable) { 255 if (desc->irq_data.chip->irq_disable) {
235 desc->irq_data.chip->irq_disable(&desc->irq_data); 256 desc->irq_data.chip->irq_disable(&desc->irq_data);
236 irq_state_set_masked(desc); 257 irq_state_set_masked(desc);
258 } else if (irq_settings_disable_unlazy(desc)) {
259 mask_irq(desc);
237 } 260 }
238} 261}
239 262
@@ -669,7 +692,7 @@ void handle_percpu_irq(struct irq_desc *desc)
669 if (chip->irq_ack) 692 if (chip->irq_ack)
670 chip->irq_ack(&desc->irq_data); 693 chip->irq_ack(&desc->irq_data);
671 694
672 handle_irq_event_percpu(desc, desc->action); 695 handle_irq_event_percpu(desc);
673 696
674 if (chip->irq_eoi) 697 if (chip->irq_eoi)
675 chip->irq_eoi(&desc->irq_data); 698 chip->irq_eoi(&desc->irq_data);
@@ -746,6 +769,8 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
746 if (desc->irq_data.chip != &no_irq_chip) 769 if (desc->irq_data.chip != &no_irq_chip)
747 mask_ack_irq(desc); 770 mask_ack_irq(desc);
748 irq_state_set_disabled(desc); 771 irq_state_set_disabled(desc);
772 if (is_chained)
773 desc->action = NULL;
749 desc->depth = 1; 774 desc->depth = 1;
750 } 775 }
751 desc->handle_irq = handle; 776 desc->handle_irq = handle;
@@ -755,6 +780,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
755 irq_settings_set_noprobe(desc); 780 irq_settings_set_noprobe(desc);
756 irq_settings_set_norequest(desc); 781 irq_settings_set_norequest(desc);
757 irq_settings_set_nothread(desc); 782 irq_settings_set_nothread(desc);
783 desc->action = &chained_action;
758 irq_startup(desc, true); 784 irq_startup(desc, true);
759 } 785 }
760} 786}
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
new file mode 100644
index 000000000000..80f4f4e56fed
--- /dev/null
+++ b/kernel/irq/cpuhotplug.c
@@ -0,0 +1,82 @@
1/*
2 * Generic cpu hotunplug interrupt migration code copied from the
3 * arch/arm implementation
4 *
5 * Copyright (C) Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/interrupt.h>
12#include <linux/ratelimit.h>
13#include <linux/irq.h>
14
15#include "internals.h"
16
17static bool migrate_one_irq(struct irq_desc *desc)
18{
19 struct irq_data *d = irq_desc_get_irq_data(desc);
20 const struct cpumask *affinity = d->common->affinity;
21 struct irq_chip *c;
22 bool ret = false;
23
24 /*
25 * If this is a per-CPU interrupt, or the affinity does not
26 * include this CPU, then we have nothing to do.
27 */
28 if (irqd_is_per_cpu(d) ||
29 !cpumask_test_cpu(smp_processor_id(), affinity))
30 return false;
31
32 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
33 affinity = cpu_online_mask;
34 ret = true;
35 }
36
37 c = irq_data_get_irq_chip(d);
38 if (!c->irq_set_affinity) {
39 pr_warn_ratelimited("IRQ%u: unable to set affinity\n", d->irq);
40 } else {
41 int r = irq_do_set_affinity(d, affinity, false);
42 if (r)
43 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
44 d->irq, r);
45 }
46
47 return ret;
48}
49
50/**
51 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
52 *
53 * The current CPU has been marked offline. Migrate IRQs off this CPU.
54 * If the affinity settings do not allow other CPUs, force them onto any
55 * available CPU.
56 *
57 * Note: we must iterate over all IRQs, whether they have an attached
58 * action structure or not, as we need to get chained interrupts too.
59 */
60void irq_migrate_all_off_this_cpu(void)
61{
62 unsigned int irq;
63 struct irq_desc *desc;
64 unsigned long flags;
65
66 local_irq_save(flags);
67
68 for_each_active_irq(irq) {
69 bool affinity_broken;
70
71 desc = irq_to_desc(irq);
72 raw_spin_lock(&desc->lock);
73 affinity_broken = migrate_one_irq(desc);
74 raw_spin_unlock(&desc->lock);
75
76 if (affinity_broken)
77 pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
78 irq, smp_processor_id());
79 }
80
81 local_irq_restore(flags);
82}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e25a83b67cce..a302cf9a2126 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -132,11 +132,11 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
132 wake_up_process(action->thread); 132 wake_up_process(action->thread);
133} 133}
134 134
135irqreturn_t 135irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
136handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
137{ 136{
138 irqreturn_t retval = IRQ_NONE; 137 irqreturn_t retval = IRQ_NONE;
139 unsigned int flags = 0, irq = desc->irq_data.irq; 138 unsigned int flags = 0, irq = desc->irq_data.irq;
139 struct irqaction *action = desc->action;
140 140
141 do { 141 do {
142 irqreturn_t res; 142 irqreturn_t res;
@@ -184,14 +184,13 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
184 184
185irqreturn_t handle_irq_event(struct irq_desc *desc) 185irqreturn_t handle_irq_event(struct irq_desc *desc)
186{ 186{
187 struct irqaction *action = desc->action;
188 irqreturn_t ret; 187 irqreturn_t ret;
189 188
190 desc->istate &= ~IRQS_PENDING; 189 desc->istate &= ~IRQS_PENDING;
191 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 190 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
192 raw_spin_unlock(&desc->lock); 191 raw_spin_unlock(&desc->lock);
193 192
194 ret = handle_irq_event_percpu(desc, action); 193 ret = handle_irq_event_percpu(desc);
195 194
196 raw_spin_lock(&desc->lock); 195 raw_spin_lock(&desc->lock);
197 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 196 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 5ef0c2dbe930..05c2188271b8 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -18,6 +18,8 @@
18 18
19extern bool noirqdebug; 19extern bool noirqdebug;
20 20
21extern struct irqaction chained_action;
22
21/* 23/*
22 * Bits used by threaded handlers: 24 * Bits used by threaded handlers:
23 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run 25 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
@@ -81,7 +83,7 @@ extern void irq_mark_irq(unsigned int irq);
81 83
82extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 84extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
83 85
84irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action); 86irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
85irqreturn_t handle_irq_event(struct irq_desc *desc); 87irqreturn_t handle_irq_event(struct irq_desc *desc);
86 88
87/* Resending of interrupts :*/ 89/* Resending of interrupts :*/
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index dc9d27c0c158..22aa9612ef7c 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -27,6 +27,57 @@ static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
27 irq_hw_number_t hwirq, int node); 27 irq_hw_number_t hwirq, int node);
28static void irq_domain_check_hierarchy(struct irq_domain *domain); 28static void irq_domain_check_hierarchy(struct irq_domain *domain);
29 29
30struct irqchip_fwid {
31 struct fwnode_handle fwnode;
32 char *name;
33 void *data;
34};
35
36/**
37 * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
38 * identifying an irq domain
39 * @data: optional user-provided data
40 *
41 * Allocate a struct device_node, and return a poiner to the embedded
42 * fwnode_handle (or NULL on failure).
43 */
44struct fwnode_handle *irq_domain_alloc_fwnode(void *data)
45{
46 struct irqchip_fwid *fwid;
47 char *name;
48
49 fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
50 name = kasprintf(GFP_KERNEL, "irqchip@%p", data);
51
52 if (!fwid || !name) {
53 kfree(fwid);
54 kfree(name);
55 return NULL;
56 }
57
58 fwid->name = name;
59 fwid->data = data;
60 fwid->fwnode.type = FWNODE_IRQCHIP;
61 return &fwid->fwnode;
62}
63
64/**
65 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
66 *
67 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
68 */
69void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
70{
71 struct irqchip_fwid *fwid;
72
73 if (WARN_ON(fwnode->type != FWNODE_IRQCHIP))
74 return;
75
76 fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
77 kfree(fwid->name);
78 kfree(fwid);
79}
80
30/** 81/**
31 * __irq_domain_add() - Allocate a new irq_domain data structure 82 * __irq_domain_add() - Allocate a new irq_domain data structure
32 * @of_node: optional device-tree node of the interrupt controller 83 * @of_node: optional device-tree node of the interrupt controller
@@ -40,23 +91,28 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain);
40 * Allocates and initialize and irq_domain structure. 91 * Allocates and initialize and irq_domain structure.
41 * Returns pointer to IRQ domain, or NULL on failure. 92 * Returns pointer to IRQ domain, or NULL on failure.
42 */ 93 */
43struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, 94struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
44 irq_hw_number_t hwirq_max, int direct_max, 95 irq_hw_number_t hwirq_max, int direct_max,
45 const struct irq_domain_ops *ops, 96 const struct irq_domain_ops *ops,
46 void *host_data) 97 void *host_data)
47{ 98{
48 struct irq_domain *domain; 99 struct irq_domain *domain;
100 struct device_node *of_node;
101
102 of_node = to_of_node(fwnode);
49 103
50 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), 104 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
51 GFP_KERNEL, of_node_to_nid(of_node)); 105 GFP_KERNEL, of_node_to_nid(of_node));
52 if (WARN_ON(!domain)) 106 if (WARN_ON(!domain))
53 return NULL; 107 return NULL;
54 108
109 of_node_get(of_node);
110
55 /* Fill structure */ 111 /* Fill structure */
56 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); 112 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
57 domain->ops = ops; 113 domain->ops = ops;
58 domain->host_data = host_data; 114 domain->host_data = host_data;
59 domain->of_node = of_node_get(of_node); 115 domain->fwnode = fwnode;
60 domain->hwirq_max = hwirq_max; 116 domain->hwirq_max = hwirq_max;
61 domain->revmap_size = size; 117 domain->revmap_size = size;
62 domain->revmap_direct_max_irq = direct_max; 118 domain->revmap_direct_max_irq = direct_max;
@@ -102,7 +158,7 @@ void irq_domain_remove(struct irq_domain *domain)
102 158
103 pr_debug("Removed domain %s\n", domain->name); 159 pr_debug("Removed domain %s\n", domain->name);
104 160
105 of_node_put(domain->of_node); 161 of_node_put(irq_domain_get_of_node(domain));
106 kfree(domain); 162 kfree(domain);
107} 163}
108EXPORT_SYMBOL_GPL(irq_domain_remove); 164EXPORT_SYMBOL_GPL(irq_domain_remove);
@@ -133,7 +189,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
133{ 189{
134 struct irq_domain *domain; 190 struct irq_domain *domain;
135 191
136 domain = __irq_domain_add(of_node, size, size, 0, ops, host_data); 192 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
137 if (!domain) 193 if (!domain)
138 return NULL; 194 return NULL;
139 195
@@ -177,7 +233,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
177{ 233{
178 struct irq_domain *domain; 234 struct irq_domain *domain;
179 235
180 domain = __irq_domain_add(of_node, first_hwirq + size, 236 domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
181 first_hwirq + size, 0, ops, host_data); 237 first_hwirq + size, 0, ops, host_data);
182 if (domain) 238 if (domain)
183 irq_domain_associate_many(domain, first_irq, first_hwirq, size); 239 irq_domain_associate_many(domain, first_irq, first_hwirq, size);
@@ -187,12 +243,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
187EXPORT_SYMBOL_GPL(irq_domain_add_legacy); 243EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
188 244
189/** 245/**
190 * irq_find_matching_host() - Locates a domain for a given device node 246 * irq_find_matching_fwnode() - Locates a domain for a given fwnode
191 * @node: device-tree node of the interrupt controller 247 * @fwnode: FW descriptor of the interrupt controller
192 * @bus_token: domain-specific data 248 * @bus_token: domain-specific data
193 */ 249 */
194struct irq_domain *irq_find_matching_host(struct device_node *node, 250struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
195 enum irq_domain_bus_token bus_token) 251 enum irq_domain_bus_token bus_token)
196{ 252{
197 struct irq_domain *h, *found = NULL; 253 struct irq_domain *h, *found = NULL;
198 int rc; 254 int rc;
@@ -209,9 +265,9 @@ struct irq_domain *irq_find_matching_host(struct device_node *node,
209 mutex_lock(&irq_domain_mutex); 265 mutex_lock(&irq_domain_mutex);
210 list_for_each_entry(h, &irq_domain_list, link) { 266 list_for_each_entry(h, &irq_domain_list, link) {
211 if (h->ops->match) 267 if (h->ops->match)
212 rc = h->ops->match(h, node, bus_token); 268 rc = h->ops->match(h, to_of_node(fwnode), bus_token);
213 else 269 else
214 rc = ((h->of_node != NULL) && (h->of_node == node) && 270 rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
215 ((bus_token == DOMAIN_BUS_ANY) || 271 ((bus_token == DOMAIN_BUS_ANY) ||
216 (h->bus_token == bus_token))); 272 (h->bus_token == bus_token)));
217 273
@@ -223,7 +279,7 @@ struct irq_domain *irq_find_matching_host(struct device_node *node,
223 mutex_unlock(&irq_domain_mutex); 279 mutex_unlock(&irq_domain_mutex);
224 return found; 280 return found;
225} 281}
226EXPORT_SYMBOL_GPL(irq_find_matching_host); 282EXPORT_SYMBOL_GPL(irq_find_matching_fwnode);
227 283
228/** 284/**
229 * irq_set_default_host() - Set a "default" irq domain 285 * irq_set_default_host() - Set a "default" irq domain
@@ -336,10 +392,12 @@ EXPORT_SYMBOL_GPL(irq_domain_associate);
336void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, 392void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
337 irq_hw_number_t hwirq_base, int count) 393 irq_hw_number_t hwirq_base, int count)
338{ 394{
395 struct device_node *of_node;
339 int i; 396 int i;
340 397
398 of_node = irq_domain_get_of_node(domain);
341 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, 399 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
342 of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count); 400 of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
343 401
344 for (i = 0; i < count; i++) { 402 for (i = 0; i < count; i++) {
345 irq_domain_associate(domain, irq_base + i, hwirq_base + i); 403 irq_domain_associate(domain, irq_base + i, hwirq_base + i);
@@ -359,12 +417,14 @@ EXPORT_SYMBOL_GPL(irq_domain_associate_many);
359 */ 417 */
360unsigned int irq_create_direct_mapping(struct irq_domain *domain) 418unsigned int irq_create_direct_mapping(struct irq_domain *domain)
361{ 419{
420 struct device_node *of_node;
362 unsigned int virq; 421 unsigned int virq;
363 422
364 if (domain == NULL) 423 if (domain == NULL)
365 domain = irq_default_domain; 424 domain = irq_default_domain;
366 425
367 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); 426 of_node = irq_domain_get_of_node(domain);
427 virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
368 if (!virq) { 428 if (!virq) {
369 pr_debug("create_direct virq allocation failed\n"); 429 pr_debug("create_direct virq allocation failed\n");
370 return 0; 430 return 0;
@@ -399,6 +459,7 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
399unsigned int irq_create_mapping(struct irq_domain *domain, 459unsigned int irq_create_mapping(struct irq_domain *domain,
400 irq_hw_number_t hwirq) 460 irq_hw_number_t hwirq)
401{ 461{
462 struct device_node *of_node;
402 int virq; 463 int virq;
403 464
404 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 465 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
@@ -412,6 +473,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
412 } 473 }
413 pr_debug("-> using domain @%p\n", domain); 474 pr_debug("-> using domain @%p\n", domain);
414 475
476 of_node = irq_domain_get_of_node(domain);
477
415 /* Check if mapping already exists */ 478 /* Check if mapping already exists */
416 virq = irq_find_mapping(domain, hwirq); 479 virq = irq_find_mapping(domain, hwirq);
417 if (virq) { 480 if (virq) {
@@ -420,8 +483,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
420 } 483 }
421 484
422 /* Allocate a virtual interrupt number */ 485 /* Allocate a virtual interrupt number */
423 virq = irq_domain_alloc_descs(-1, 1, hwirq, 486 virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node));
424 of_node_to_nid(domain->of_node));
425 if (virq <= 0) { 487 if (virq <= 0) {
426 pr_debug("-> virq allocation failed\n"); 488 pr_debug("-> virq allocation failed\n");
427 return 0; 489 return 0;
@@ -433,7 +495,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
433 } 495 }
434 496
435 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", 497 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
436 hwirq, of_node_full_name(domain->of_node), virq); 498 hwirq, of_node_full_name(of_node), virq);
437 499
438 return virq; 500 return virq;
439} 501}
@@ -460,10 +522,12 @@ EXPORT_SYMBOL_GPL(irq_create_mapping);
460int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, 522int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
461 irq_hw_number_t hwirq_base, int count) 523 irq_hw_number_t hwirq_base, int count)
462{ 524{
525 struct device_node *of_node;
463 int ret; 526 int ret;
464 527
528 of_node = irq_domain_get_of_node(domain);
465 ret = irq_alloc_descs(irq_base, irq_base, count, 529 ret = irq_alloc_descs(irq_base, irq_base, count,
466 of_node_to_nid(domain->of_node)); 530 of_node_to_nid(of_node));
467 if (unlikely(ret < 0)) 531 if (unlikely(ret < 0))
468 return ret; 532 return ret;
469 533
@@ -472,28 +536,56 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
472} 536}
473EXPORT_SYMBOL_GPL(irq_create_strict_mappings); 537EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
474 538
475unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) 539static int irq_domain_translate(struct irq_domain *d,
540 struct irq_fwspec *fwspec,
541 irq_hw_number_t *hwirq, unsigned int *type)
542{
543#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
544 if (d->ops->translate)
545 return d->ops->translate(d, fwspec, hwirq, type);
546#endif
547 if (d->ops->xlate)
548 return d->ops->xlate(d, to_of_node(fwspec->fwnode),
549 fwspec->param, fwspec->param_count,
550 hwirq, type);
551
552 /* If domain has no translation, then we assume interrupt line */
553 *hwirq = fwspec->param[0];
554 return 0;
555}
556
557static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
558 struct irq_fwspec *fwspec)
559{
560 int i;
561
562 fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
563 fwspec->param_count = irq_data->args_count;
564
565 for (i = 0; i < irq_data->args_count; i++)
566 fwspec->param[i] = irq_data->args[i];
567}
568
569unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
476{ 570{
477 struct irq_domain *domain; 571 struct irq_domain *domain;
478 irq_hw_number_t hwirq; 572 irq_hw_number_t hwirq;
479 unsigned int type = IRQ_TYPE_NONE; 573 unsigned int type = IRQ_TYPE_NONE;
480 int virq; 574 int virq;
481 575
482 domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain; 576 if (fwspec->fwnode)
577 domain = irq_find_matching_fwnode(fwspec->fwnode, DOMAIN_BUS_ANY);
578 else
579 domain = irq_default_domain;
580
483 if (!domain) { 581 if (!domain) {
484 pr_warn("no irq domain found for %s !\n", 582 pr_warn("no irq domain found for %s !\n",
485 of_node_full_name(irq_data->np)); 583 of_node_full_name(to_of_node(fwspec->fwnode)));
486 return 0; 584 return 0;
487 } 585 }
488 586
489 /* If domain has no translation, then we assume interrupt line */ 587 if (irq_domain_translate(domain, fwspec, &hwirq, &type))
490 if (domain->ops->xlate == NULL) 588 return 0;
491 hwirq = irq_data->args[0];
492 else {
493 if (domain->ops->xlate(domain, irq_data->np, irq_data->args,
494 irq_data->args_count, &hwirq, &type))
495 return 0;
496 }
497 589
498 if (irq_domain_is_hierarchy(domain)) { 590 if (irq_domain_is_hierarchy(domain)) {
499 /* 591 /*
@@ -504,7 +596,7 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
504 if (virq) 596 if (virq)
505 return virq; 597 return virq;
506 598
507 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, irq_data); 599 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
508 if (virq <= 0) 600 if (virq <= 0)
509 return 0; 601 return 0;
510 } else { 602 } else {
@@ -520,6 +612,15 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
520 irq_set_irq_type(virq, type); 612 irq_set_irq_type(virq, type);
521 return virq; 613 return virq;
522} 614}
615EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
616
617unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
618{
619 struct irq_fwspec fwspec;
620
621 of_phandle_args_to_fwspec(irq_data, &fwspec);
622 return irq_create_fwspec_mapping(&fwspec);
623}
523EXPORT_SYMBOL_GPL(irq_create_of_mapping); 624EXPORT_SYMBOL_GPL(irq_create_of_mapping);
524 625
525/** 626/**
@@ -590,14 +691,16 @@ static int virq_debug_show(struct seq_file *m, void *private)
590 "name", "mapped", "linear-max", "direct-max", "devtree-node"); 691 "name", "mapped", "linear-max", "direct-max", "devtree-node");
591 mutex_lock(&irq_domain_mutex); 692 mutex_lock(&irq_domain_mutex);
592 list_for_each_entry(domain, &irq_domain_list, link) { 693 list_for_each_entry(domain, &irq_domain_list, link) {
694 struct device_node *of_node;
593 int count = 0; 695 int count = 0;
696 of_node = irq_domain_get_of_node(domain);
594 radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) 697 radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
595 count++; 698 count++;
596 seq_printf(m, "%c%-16s %6u %10u %10u %s\n", 699 seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
597 domain == irq_default_domain ? '*' : ' ', domain->name, 700 domain == irq_default_domain ? '*' : ' ', domain->name,
598 domain->revmap_size + count, domain->revmap_size, 701 domain->revmap_size + count, domain->revmap_size,
599 domain->revmap_direct_max_irq, 702 domain->revmap_direct_max_irq,
600 domain->of_node ? of_node_full_name(domain->of_node) : ""); 703 of_node ? of_node_full_name(of_node) : "");
601 } 704 }
602 mutex_unlock(&irq_domain_mutex); 705 mutex_unlock(&irq_domain_mutex);
603 706
@@ -751,11 +854,11 @@ static int irq_domain_alloc_descs(int virq, unsigned int cnt,
751 854
752#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 855#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
753/** 856/**
754 * irq_domain_add_hierarchy - Add a irqdomain into the hierarchy 857 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
755 * @parent: Parent irq domain to associate with the new domain 858 * @parent: Parent irq domain to associate with the new domain
756 * @flags: Irq domain flags associated to the domain 859 * @flags: Irq domain flags associated to the domain
757 * @size: Size of the domain. See below 860 * @size: Size of the domain. See below
758 * @node: Optional device-tree node of the interrupt controller 861 * @fwnode: Optional fwnode of the interrupt controller
759 * @ops: Pointer to the interrupt domain callbacks 862 * @ops: Pointer to the interrupt domain callbacks
760 * @host_data: Controller private data pointer 863 * @host_data: Controller private data pointer
761 * 864 *
@@ -765,19 +868,19 @@ static int irq_domain_alloc_descs(int virq, unsigned int cnt,
765 * domain flags are set. 868 * domain flags are set.
766 * Returns pointer to IRQ domain, or NULL on failure. 869 * Returns pointer to IRQ domain, or NULL on failure.
767 */ 870 */
768struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, 871struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
769 unsigned int flags, 872 unsigned int flags,
770 unsigned int size, 873 unsigned int size,
771 struct device_node *node, 874 struct fwnode_handle *fwnode,
772 const struct irq_domain_ops *ops, 875 const struct irq_domain_ops *ops,
773 void *host_data) 876 void *host_data)
774{ 877{
775 struct irq_domain *domain; 878 struct irq_domain *domain;
776 879
777 if (size) 880 if (size)
778 domain = irq_domain_add_linear(node, size, ops, host_data); 881 domain = irq_domain_create_linear(fwnode, size, ops, host_data);
779 else 882 else
780 domain = irq_domain_add_tree(node, ops, host_data); 883 domain = irq_domain_create_tree(fwnode, ops, host_data);
781 if (domain) { 884 if (domain) {
782 domain->parent = parent; 885 domain->parent = parent;
783 domain->flags |= flags; 886 domain->flags |= flags;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f9a59f6cabd2..a71175ff98d5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -258,37 +258,6 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
258} 258}
259EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 259EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
260 260
261/**
262 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
263 * @irq: interrupt number to set affinity
264 * @vcpu_info: vCPU specific data
265 *
266 * This function uses the vCPU specific data to set the vCPU
267 * affinity for an irq. The vCPU specific data is passed from
268 * outside, such as KVM. One example code path is as below:
269 * KVM -> IOMMU -> irq_set_vcpu_affinity().
270 */
271int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
272{
273 unsigned long flags;
274 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
275 struct irq_data *data;
276 struct irq_chip *chip;
277 int ret = -ENOSYS;
278
279 if (!desc)
280 return -EINVAL;
281
282 data = irq_desc_get_irq_data(desc);
283 chip = irq_data_get_irq_chip(data);
284 if (chip && chip->irq_set_vcpu_affinity)
285 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
286 irq_put_desc_unlock(desc, flags);
287
288 return ret;
289}
290EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
291
292static void irq_affinity_notify(struct work_struct *work) 261static void irq_affinity_notify(struct work_struct *work)
293{ 262{
294 struct irq_affinity_notify *notify = 263 struct irq_affinity_notify *notify =
@@ -424,6 +393,37 @@ setup_affinity(struct irq_desc *desc, struct cpumask *mask)
424} 393}
425#endif 394#endif
426 395
396/**
397 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
398 * @irq: interrupt number to set affinity
399 * @vcpu_info: vCPU specific data
400 *
401 * This function uses the vCPU specific data to set the vCPU
402 * affinity for an irq. The vCPU specific data is passed from
403 * outside, such as KVM. One example code path is as below:
404 * KVM -> IOMMU -> irq_set_vcpu_affinity().
405 */
406int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
407{
408 unsigned long flags;
409 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
410 struct irq_data *data;
411 struct irq_chip *chip;
412 int ret = -ENOSYS;
413
414 if (!desc)
415 return -EINVAL;
416
417 data = irq_desc_get_irq_data(desc);
418 chip = irq_data_get_irq_chip(data);
419 if (chip && chip->irq_set_vcpu_affinity)
420 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
421 irq_put_desc_unlock(desc, flags);
422
423 return ret;
424}
425EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
426
427void __disable_irq(struct irq_desc *desc) 427void __disable_irq(struct irq_desc *desc)
428{ 428{
429 if (!desc->depth++) 429 if (!desc->depth++)
@@ -730,6 +730,12 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
730 return IRQ_NONE; 730 return IRQ_NONE;
731} 731}
732 732
733static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
734{
735 WARN(1, "Secondary action handler called for irq %d\n", irq);
736 return IRQ_NONE;
737}
738
733static int irq_wait_for_interrupt(struct irqaction *action) 739static int irq_wait_for_interrupt(struct irqaction *action)
734{ 740{
735 set_current_state(TASK_INTERRUPTIBLE); 741 set_current_state(TASK_INTERRUPTIBLE);
@@ -756,7 +762,8 @@ static int irq_wait_for_interrupt(struct irqaction *action)
756static void irq_finalize_oneshot(struct irq_desc *desc, 762static void irq_finalize_oneshot(struct irq_desc *desc,
757 struct irqaction *action) 763 struct irqaction *action)
758{ 764{
759 if (!(desc->istate & IRQS_ONESHOT)) 765 if (!(desc->istate & IRQS_ONESHOT) ||
766 action->handler == irq_forced_secondary_handler)
760 return; 767 return;
761again: 768again:
762 chip_bus_lock(desc); 769 chip_bus_lock(desc);
@@ -910,6 +917,18 @@ static void irq_thread_dtor(struct callback_head *unused)
910 irq_finalize_oneshot(desc, action); 917 irq_finalize_oneshot(desc, action);
911} 918}
912 919
920static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
921{
922 struct irqaction *secondary = action->secondary;
923
924 if (WARN_ON_ONCE(!secondary))
925 return;
926
927 raw_spin_lock_irq(&desc->lock);
928 __irq_wake_thread(desc, secondary);
929 raw_spin_unlock_irq(&desc->lock);
930}
931
913/* 932/*
914 * Interrupt handler thread 933 * Interrupt handler thread
915 */ 934 */
@@ -940,6 +959,8 @@ static int irq_thread(void *data)
940 action_ret = handler_fn(desc, action); 959 action_ret = handler_fn(desc, action);
941 if (action_ret == IRQ_HANDLED) 960 if (action_ret == IRQ_HANDLED)
942 atomic_inc(&desc->threads_handled); 961 atomic_inc(&desc->threads_handled);
962 if (action_ret == IRQ_WAKE_THREAD)
963 irq_wake_secondary(desc, action);
943 964
944 wake_threads_waitq(desc); 965 wake_threads_waitq(desc);
945 } 966 }
@@ -984,20 +1005,36 @@ void irq_wake_thread(unsigned int irq, void *dev_id)
984} 1005}
985EXPORT_SYMBOL_GPL(irq_wake_thread); 1006EXPORT_SYMBOL_GPL(irq_wake_thread);
986 1007
987static void irq_setup_forced_threading(struct irqaction *new) 1008static int irq_setup_forced_threading(struct irqaction *new)
988{ 1009{
989 if (!force_irqthreads) 1010 if (!force_irqthreads)
990 return; 1011 return 0;
991 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 1012 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
992 return; 1013 return 0;
993 1014
994 new->flags |= IRQF_ONESHOT; 1015 new->flags |= IRQF_ONESHOT;
995 1016
996 if (!new->thread_fn) { 1017 /*
997 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); 1018 * Handle the case where we have a real primary handler and a
998 new->thread_fn = new->handler; 1019 * thread handler. We force thread them as well by creating a
999 new->handler = irq_default_primary_handler; 1020 * secondary action.
1021 */
1022 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1023 /* Allocate the secondary action */
1024 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1025 if (!new->secondary)
1026 return -ENOMEM;
1027 new->secondary->handler = irq_forced_secondary_handler;
1028 new->secondary->thread_fn = new->thread_fn;
1029 new->secondary->dev_id = new->dev_id;
1030 new->secondary->irq = new->irq;
1031 new->secondary->name = new->name;
1000 } 1032 }
1033 /* Deal with the primary handler */
1034 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1035 new->thread_fn = new->handler;
1036 new->handler = irq_default_primary_handler;
1037 return 0;
1001} 1038}
1002 1039
1003static int irq_request_resources(struct irq_desc *desc) 1040static int irq_request_resources(struct irq_desc *desc)
@@ -1017,6 +1054,48 @@ static void irq_release_resources(struct irq_desc *desc)
1017 c->irq_release_resources(d); 1054 c->irq_release_resources(d);
1018} 1055}
1019 1056
1057static int
1058setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1059{
1060 struct task_struct *t;
1061 struct sched_param param = {
1062 .sched_priority = MAX_USER_RT_PRIO/2,
1063 };
1064
1065 if (!secondary) {
1066 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1067 new->name);
1068 } else {
1069 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1070 new->name);
1071 param.sched_priority -= 1;
1072 }
1073
1074 if (IS_ERR(t))
1075 return PTR_ERR(t);
1076
1077 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1078
1079 /*
1080 * We keep the reference to the task struct even if
1081 * the thread dies to avoid that the interrupt code
1082 * references an already freed task_struct.
1083 */
1084 get_task_struct(t);
1085 new->thread = t;
1086 /*
1087 * Tell the thread to set its affinity. This is
1088 * important for shared interrupt handlers as we do
1089 * not invoke setup_affinity() for the secondary
1090 * handlers as everything is already set up. Even for
1091 * interrupts marked with IRQF_NO_BALANCE this is
1092 * correct as we want the thread to move to the cpu(s)
1093 * on which the requesting code placed the interrupt.
1094 */
1095 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1096 return 0;
1097}
1098
1020/* 1099/*
1021 * Internal function to register an irqaction - typically used to 1100 * Internal function to register an irqaction - typically used to
1022 * allocate special interrupts that are part of the architecture. 1101 * allocate special interrupts that are part of the architecture.
@@ -1037,6 +1116,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1037 if (!try_module_get(desc->owner)) 1116 if (!try_module_get(desc->owner))
1038 return -ENODEV; 1117 return -ENODEV;
1039 1118
1119 new->irq = irq;
1120
1040 /* 1121 /*
1041 * Check whether the interrupt nests into another interrupt 1122 * Check whether the interrupt nests into another interrupt
1042 * thread. 1123 * thread.
@@ -1054,8 +1135,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1054 */ 1135 */
1055 new->handler = irq_nested_primary_handler; 1136 new->handler = irq_nested_primary_handler;
1056 } else { 1137 } else {
1057 if (irq_settings_can_thread(desc)) 1138 if (irq_settings_can_thread(desc)) {
1058 irq_setup_forced_threading(new); 1139 ret = irq_setup_forced_threading(new);
1140 if (ret)
1141 goto out_mput;
1142 }
1059 } 1143 }
1060 1144
1061 /* 1145 /*
@@ -1064,37 +1148,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1064 * thread. 1148 * thread.
1065 */ 1149 */
1066 if (new->thread_fn && !nested) { 1150 if (new->thread_fn && !nested) {
1067 struct task_struct *t; 1151 ret = setup_irq_thread(new, irq, false);
1068 static const struct sched_param param = { 1152 if (ret)
1069 .sched_priority = MAX_USER_RT_PRIO/2,
1070 };
1071
1072 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1073 new->name);
1074 if (IS_ERR(t)) {
1075 ret = PTR_ERR(t);
1076 goto out_mput; 1153 goto out_mput;
1154 if (new->secondary) {
1155 ret = setup_irq_thread(new->secondary, irq, true);
1156 if (ret)
1157 goto out_thread;
1077 } 1158 }
1078
1079 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1080
1081 /*
1082 * We keep the reference to the task struct even if
1083 * the thread dies to avoid that the interrupt code
1084 * references an already freed task_struct.
1085 */
1086 get_task_struct(t);
1087 new->thread = t;
1088 /*
1089 * Tell the thread to set its affinity. This is
1090 * important for shared interrupt handlers as we do
1091 * not invoke setup_affinity() for the secondary
1092 * handlers as everything is already set up. Even for
1093 * interrupts marked with IRQF_NO_BALANCE this is
1094 * correct as we want the thread to move to the cpu(s)
1095 * on which the requesting code placed the interrupt.
1096 */
1097 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1098 } 1159 }
1099 1160
1100 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 1161 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
@@ -1267,7 +1328,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1267 irq, nmsk, omsk); 1328 irq, nmsk, omsk);
1268 } 1329 }
1269 1330
1270 new->irq = irq;
1271 *old_ptr = new; 1331 *old_ptr = new;
1272 1332
1273 irq_pm_install_action(desc, new); 1333 irq_pm_install_action(desc, new);
@@ -1293,6 +1353,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1293 */ 1353 */
1294 if (new->thread) 1354 if (new->thread)
1295 wake_up_process(new->thread); 1355 wake_up_process(new->thread);
1356 if (new->secondary)
1357 wake_up_process(new->secondary->thread);
1296 1358
1297 register_irq_proc(irq, desc); 1359 register_irq_proc(irq, desc);
1298 new->dir = NULL; 1360 new->dir = NULL;
@@ -1323,6 +1385,13 @@ out_thread:
1323 kthread_stop(t); 1385 kthread_stop(t);
1324 put_task_struct(t); 1386 put_task_struct(t);
1325 } 1387 }
1388 if (new->secondary && new->secondary->thread) {
1389 struct task_struct *t = new->secondary->thread;
1390
1391 new->secondary->thread = NULL;
1392 kthread_stop(t);
1393 put_task_struct(t);
1394 }
1326out_mput: 1395out_mput:
1327 module_put(desc->owner); 1396 module_put(desc->owner);
1328 return ret; 1397 return ret;
@@ -1394,6 +1463,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1394 1463
1395 /* If this was the last handler, shut down the IRQ line: */ 1464 /* If this was the last handler, shut down the IRQ line: */
1396 if (!desc->action) { 1465 if (!desc->action) {
1466 irq_settings_clr_disable_unlazy(desc);
1397 irq_shutdown(desc); 1467 irq_shutdown(desc);
1398 irq_release_resources(desc); 1468 irq_release_resources(desc);
1399 } 1469 }
@@ -1430,9 +1500,14 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1430 if (action->thread) { 1500 if (action->thread) {
1431 kthread_stop(action->thread); 1501 kthread_stop(action->thread);
1432 put_task_struct(action->thread); 1502 put_task_struct(action->thread);
1503 if (action->secondary && action->secondary->thread) {
1504 kthread_stop(action->secondary->thread);
1505 put_task_struct(action->secondary->thread);
1506 }
1433 } 1507 }
1434 1508
1435 module_put(desc->owner); 1509 module_put(desc->owner);
1510 kfree(action->secondary);
1436 return action; 1511 return action;
1437} 1512}
1438 1513
@@ -1576,8 +1651,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1576 retval = __setup_irq(irq, desc, action); 1651 retval = __setup_irq(irq, desc, action);
1577 chip_bus_sync_unlock(desc); 1652 chip_bus_sync_unlock(desc);
1578 1653
1579 if (retval) 1654 if (retval) {
1655 kfree(action->secondary);
1580 kfree(action); 1656 kfree(action);
1657 }
1581 1658
1582#ifdef CONFIG_DEBUG_SHIRQ_FIXME 1659#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1583 if (!retval && (irqflags & IRQF_SHARED)) { 1660 if (!retval && (irqflags & IRQF_SHARED)) {
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index be9149f62eb8..6b0c0b74a2a1 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -235,11 +235,11 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info)
235 235
236/** 236/**
237 * msi_create_irq_domain - Create a MSI interrupt domain 237 * msi_create_irq_domain - Create a MSI interrupt domain
238 * @of_node: Optional device-tree node of the interrupt controller 238 * @fwnode: Optional fwnode of the interrupt controller
239 * @info: MSI domain info 239 * @info: MSI domain info
240 * @parent: Parent irq domain 240 * @parent: Parent irq domain
241 */ 241 */
242struct irq_domain *msi_create_irq_domain(struct device_node *node, 242struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
243 struct msi_domain_info *info, 243 struct msi_domain_info *info,
244 struct irq_domain *parent) 244 struct irq_domain *parent)
245{ 245{
@@ -248,8 +248,8 @@ struct irq_domain *msi_create_irq_domain(struct device_node *node,
248 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 248 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
249 msi_domain_update_chip_ops(info); 249 msi_domain_update_chip_ops(info);
250 250
251 return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops, 251 return irq_domain_create_hierarchy(parent, 0, 0, fwnode,
252 info); 252 &msi_domain_ops, info);
253} 253}
254 254
255/** 255/**
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index a50ddc9417ff..a916cf144b65 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -475,7 +475,7 @@ int show_interrupts(struct seq_file *p, void *v)
475 for_each_online_cpu(j) 475 for_each_online_cpu(j)
476 any_count |= kstat_irqs_cpu(i, j); 476 any_count |= kstat_irqs_cpu(i, j);
477 action = desc->action; 477 action = desc->action;
478 if (!action && !any_count) 478 if ((!action || action == &chained_action) && !any_count)
479 goto out; 479 goto out;
480 480
481 seq_printf(p, "%*d: ", prec, i); 481 seq_printf(p, "%*d: ", prec, i);
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 3320b84cc60f..320579d89091 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -15,6 +15,7 @@ enum {
15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, 15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, 16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
17 _IRQ_IS_POLLED = IRQ_IS_POLLED, 17 _IRQ_IS_POLLED = IRQ_IS_POLLED,
18 _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
18 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 19 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
19}; 20};
20 21
@@ -28,6 +29,7 @@ enum {
28#define IRQ_NESTED_THREAD GOT_YOU_MORON 29#define IRQ_NESTED_THREAD GOT_YOU_MORON
29#define IRQ_PER_CPU_DEVID GOT_YOU_MORON 30#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
30#define IRQ_IS_POLLED GOT_YOU_MORON 31#define IRQ_IS_POLLED GOT_YOU_MORON
32#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
31#undef IRQF_MODIFY_MASK 33#undef IRQF_MODIFY_MASK
32#define IRQF_MODIFY_MASK GOT_YOU_MORON 34#define IRQF_MODIFY_MASK GOT_YOU_MORON
33 35
@@ -154,3 +156,13 @@ static inline bool irq_settings_is_polled(struct irq_desc *desc)
154{ 156{
155 return desc->status_use_accessors & _IRQ_IS_POLLED; 157 return desc->status_use_accessors & _IRQ_IS_POLLED;
156} 158}
159
160static inline bool irq_settings_disable_unlazy(struct irq_desc *desc)
161{
162 return desc->status_use_accessors & _IRQ_DISABLE_UNLAZY;
163}
164
165static inline void irq_settings_clr_disable_unlazy(struct irq_desc *desc)
166{
167 desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY;
168}
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 201b45327804..bd9f8a03cefa 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -1149,7 +1149,7 @@ static int __init parse_crashkernel_simple(char *cmdline,
1149 if (*cur == '@') 1149 if (*cur == '@')
1150 *crash_base = memparse(cur+1, &cur); 1150 *crash_base = memparse(cur+1, &cur);
1151 else if (*cur != ' ' && *cur != '\0') { 1151 else if (*cur != ' ' && *cur != '\0') {
1152 pr_warn("crashkernel: unrecognized char\n"); 1152 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1153 return -EINVAL; 1153 return -EINVAL;
1154 } 1154 }
1155 1155
@@ -1186,12 +1186,12 @@ static int __init parse_crashkernel_suffix(char *cmdline,
1186 1186
1187 /* check with suffix */ 1187 /* check with suffix */
1188 if (strncmp(cur, suffix, strlen(suffix))) { 1188 if (strncmp(cur, suffix, strlen(suffix))) {
1189 pr_warn("crashkernel: unrecognized char\n"); 1189 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1190 return -EINVAL; 1190 return -EINVAL;
1191 } 1191 }
1192 cur += strlen(suffix); 1192 cur += strlen(suffix);
1193 if (*cur != ' ' && *cur != '\0') { 1193 if (*cur != ' ' && *cur != '\0') {
1194 pr_warn("crashkernel: unrecognized char\n"); 1194 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1195 return -EINVAL; 1195 return -EINVAL;
1196 } 1196 }
1197 1197
diff --git a/kernel/kmod.c b/kernel/kmod.c
index da98d0593de2..0277d1216f80 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -327,9 +327,13 @@ static void call_usermodehelper_exec_work(struct work_struct *work)
327 call_usermodehelper_exec_sync(sub_info); 327 call_usermodehelper_exec_sync(sub_info);
328 } else { 328 } else {
329 pid_t pid; 329 pid_t pid;
330 330 /*
331 * Use CLONE_PARENT to reparent it to kthreadd; we do not
332 * want to pollute current->children, and we need a parent
333 * that always ignores SIGCHLD to ensure auto-reaping.
334 */
331 pid = kernel_thread(call_usermodehelper_exec_async, sub_info, 335 pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
332 SIGCHLD); 336 CLONE_PARENT | SIGCHLD);
333 if (pid < 0) { 337 if (pid < 0) {
334 sub_info->retval = pid; 338 sub_info->retval = pid;
335 umh_complete(sub_info); 339 umh_complete(sub_info);
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 32244186f1f2..8ef1919d63b2 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -17,12 +17,14 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2014 18 * Copyright (C) IBM Corporation, 2014
19 * 19 *
20 * Author: Paul E. McKenney <paulmck@us.ibm.com> 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Davidlohr Bueso <dave@stgolabs.net>
21 * Based on kernel/rcu/torture.c. 22 * Based on kernel/rcu/torture.c.
22 */ 23 */
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/kthread.h> 26#include <linux/kthread.h>
27#include <linux/sched/rt.h>
26#include <linux/spinlock.h> 28#include <linux/spinlock.h>
27#include <linux/rwlock.h> 29#include <linux/rwlock.h>
28#include <linux/mutex.h> 30#include <linux/mutex.h>
@@ -34,6 +36,7 @@
34#include <linux/moduleparam.h> 36#include <linux/moduleparam.h>
35#include <linux/delay.h> 37#include <linux/delay.h>
36#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/percpu-rwsem.h>
37#include <linux/torture.h> 40#include <linux/torture.h>
38 41
39MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
@@ -91,11 +94,13 @@ struct lock_torture_ops {
91 void (*init)(void); 94 void (*init)(void);
92 int (*writelock)(void); 95 int (*writelock)(void);
93 void (*write_delay)(struct torture_random_state *trsp); 96 void (*write_delay)(struct torture_random_state *trsp);
97 void (*task_boost)(struct torture_random_state *trsp);
94 void (*writeunlock)(void); 98 void (*writeunlock)(void);
95 int (*readlock)(void); 99 int (*readlock)(void);
96 void (*read_delay)(struct torture_random_state *trsp); 100 void (*read_delay)(struct torture_random_state *trsp);
97 void (*readunlock)(void); 101 void (*readunlock)(void);
98 unsigned long flags; 102
103 unsigned long flags; /* for irq spinlocks */
99 const char *name; 104 const char *name;
100}; 105};
101 106
@@ -139,9 +144,15 @@ static void torture_lock_busted_write_unlock(void)
139 /* BUGGY, do not use in real life!!! */ 144 /* BUGGY, do not use in real life!!! */
140} 145}
141 146
147static void torture_boost_dummy(struct torture_random_state *trsp)
148{
149 /* Only rtmutexes care about priority */
150}
151
142static struct lock_torture_ops lock_busted_ops = { 152static struct lock_torture_ops lock_busted_ops = {
143 .writelock = torture_lock_busted_write_lock, 153 .writelock = torture_lock_busted_write_lock,
144 .write_delay = torture_lock_busted_write_delay, 154 .write_delay = torture_lock_busted_write_delay,
155 .task_boost = torture_boost_dummy,
145 .writeunlock = torture_lock_busted_write_unlock, 156 .writeunlock = torture_lock_busted_write_unlock,
146 .readlock = NULL, 157 .readlock = NULL,
147 .read_delay = NULL, 158 .read_delay = NULL,
@@ -185,6 +196,7 @@ static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
185static struct lock_torture_ops spin_lock_ops = { 196static struct lock_torture_ops spin_lock_ops = {
186 .writelock = torture_spin_lock_write_lock, 197 .writelock = torture_spin_lock_write_lock,
187 .write_delay = torture_spin_lock_write_delay, 198 .write_delay = torture_spin_lock_write_delay,
199 .task_boost = torture_boost_dummy,
188 .writeunlock = torture_spin_lock_write_unlock, 200 .writeunlock = torture_spin_lock_write_unlock,
189 .readlock = NULL, 201 .readlock = NULL,
190 .read_delay = NULL, 202 .read_delay = NULL,
@@ -211,6 +223,7 @@ __releases(torture_spinlock)
211static struct lock_torture_ops spin_lock_irq_ops = { 223static struct lock_torture_ops spin_lock_irq_ops = {
212 .writelock = torture_spin_lock_write_lock_irq, 224 .writelock = torture_spin_lock_write_lock_irq,
213 .write_delay = torture_spin_lock_write_delay, 225 .write_delay = torture_spin_lock_write_delay,
226 .task_boost = torture_boost_dummy,
214 .writeunlock = torture_lock_spin_write_unlock_irq, 227 .writeunlock = torture_lock_spin_write_unlock_irq,
215 .readlock = NULL, 228 .readlock = NULL,
216 .read_delay = NULL, 229 .read_delay = NULL,
@@ -275,6 +288,7 @@ static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
275static struct lock_torture_ops rw_lock_ops = { 288static struct lock_torture_ops rw_lock_ops = {
276 .writelock = torture_rwlock_write_lock, 289 .writelock = torture_rwlock_write_lock,
277 .write_delay = torture_rwlock_write_delay, 290 .write_delay = torture_rwlock_write_delay,
291 .task_boost = torture_boost_dummy,
278 .writeunlock = torture_rwlock_write_unlock, 292 .writeunlock = torture_rwlock_write_unlock,
279 .readlock = torture_rwlock_read_lock, 293 .readlock = torture_rwlock_read_lock,
280 .read_delay = torture_rwlock_read_delay, 294 .read_delay = torture_rwlock_read_delay,
@@ -315,6 +329,7 @@ __releases(torture_rwlock)
315static struct lock_torture_ops rw_lock_irq_ops = { 329static struct lock_torture_ops rw_lock_irq_ops = {
316 .writelock = torture_rwlock_write_lock_irq, 330 .writelock = torture_rwlock_write_lock_irq,
317 .write_delay = torture_rwlock_write_delay, 331 .write_delay = torture_rwlock_write_delay,
332 .task_boost = torture_boost_dummy,
318 .writeunlock = torture_rwlock_write_unlock_irq, 333 .writeunlock = torture_rwlock_write_unlock_irq,
319 .readlock = torture_rwlock_read_lock_irq, 334 .readlock = torture_rwlock_read_lock_irq,
320 .read_delay = torture_rwlock_read_delay, 335 .read_delay = torture_rwlock_read_delay,
@@ -354,6 +369,7 @@ static void torture_mutex_unlock(void) __releases(torture_mutex)
354static struct lock_torture_ops mutex_lock_ops = { 369static struct lock_torture_ops mutex_lock_ops = {
355 .writelock = torture_mutex_lock, 370 .writelock = torture_mutex_lock,
356 .write_delay = torture_mutex_delay, 371 .write_delay = torture_mutex_delay,
372 .task_boost = torture_boost_dummy,
357 .writeunlock = torture_mutex_unlock, 373 .writeunlock = torture_mutex_unlock,
358 .readlock = NULL, 374 .readlock = NULL,
359 .read_delay = NULL, 375 .read_delay = NULL,
@@ -361,6 +377,90 @@ static struct lock_torture_ops mutex_lock_ops = {
361 .name = "mutex_lock" 377 .name = "mutex_lock"
362}; 378};
363 379
380#ifdef CONFIG_RT_MUTEXES
381static DEFINE_RT_MUTEX(torture_rtmutex);
382
383static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
384{
385 rt_mutex_lock(&torture_rtmutex);
386 return 0;
387}
388
389static void torture_rtmutex_boost(struct torture_random_state *trsp)
390{
391 int policy;
392 struct sched_param param;
393 const unsigned int factor = 50000; /* yes, quite arbitrary */
394
395 if (!rt_task(current)) {
396 /*
397 * (1) Boost priority once every ~50k operations. When the
398 * task tries to take the lock, the rtmutex it will account
399 * for the new priority, and do any corresponding pi-dance.
400 */
401 if (!(torture_random(trsp) %
402 (cxt.nrealwriters_stress * factor))) {
403 policy = SCHED_FIFO;
404 param.sched_priority = MAX_RT_PRIO - 1;
405 } else /* common case, do nothing */
406 return;
407 } else {
408 /*
409 * The task will remain boosted for another ~500k operations,
410 * then restored back to its original prio, and so forth.
411 *
412 * When @trsp is nil, we want to force-reset the task for
413 * stopping the kthread.
414 */
415 if (!trsp || !(torture_random(trsp) %
416 (cxt.nrealwriters_stress * factor * 2))) {
417 policy = SCHED_NORMAL;
418 param.sched_priority = 0;
419 } else /* common case, do nothing */
420 return;
421 }
422
423 sched_setscheduler_nocheck(current, policy, &param);
424}
425
426static void torture_rtmutex_delay(struct torture_random_state *trsp)
427{
428 const unsigned long shortdelay_us = 2;
429 const unsigned long longdelay_ms = 100;
430
431 /*
432 * We want a short delay mostly to emulate likely code, and
433 * we want a long delay occasionally to force massive contention.
434 */
435 if (!(torture_random(trsp) %
436 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
437 mdelay(longdelay_ms);
438 if (!(torture_random(trsp) %
439 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
440 udelay(shortdelay_us);
441#ifdef CONFIG_PREEMPT
442 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
443 preempt_schedule(); /* Allow test to be preempted. */
444#endif
445}
446
447static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
448{
449 rt_mutex_unlock(&torture_rtmutex);
450}
451
452static struct lock_torture_ops rtmutex_lock_ops = {
453 .writelock = torture_rtmutex_lock,
454 .write_delay = torture_rtmutex_delay,
455 .task_boost = torture_rtmutex_boost,
456 .writeunlock = torture_rtmutex_unlock,
457 .readlock = NULL,
458 .read_delay = NULL,
459 .readunlock = NULL,
460 .name = "rtmutex_lock"
461};
462#endif
463
364static DECLARE_RWSEM(torture_rwsem); 464static DECLARE_RWSEM(torture_rwsem);
365static int torture_rwsem_down_write(void) __acquires(torture_rwsem) 465static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
366{ 466{
@@ -419,6 +519,7 @@ static void torture_rwsem_up_read(void) __releases(torture_rwsem)
419static struct lock_torture_ops rwsem_lock_ops = { 519static struct lock_torture_ops rwsem_lock_ops = {
420 .writelock = torture_rwsem_down_write, 520 .writelock = torture_rwsem_down_write,
421 .write_delay = torture_rwsem_write_delay, 521 .write_delay = torture_rwsem_write_delay,
522 .task_boost = torture_boost_dummy,
422 .writeunlock = torture_rwsem_up_write, 523 .writeunlock = torture_rwsem_up_write,
423 .readlock = torture_rwsem_down_read, 524 .readlock = torture_rwsem_down_read,
424 .read_delay = torture_rwsem_read_delay, 525 .read_delay = torture_rwsem_read_delay,
@@ -426,6 +527,48 @@ static struct lock_torture_ops rwsem_lock_ops = {
426 .name = "rwsem_lock" 527 .name = "rwsem_lock"
427}; 528};
428 529
530#include <linux/percpu-rwsem.h>
531static struct percpu_rw_semaphore pcpu_rwsem;
532
533void torture_percpu_rwsem_init(void)
534{
535 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
536}
537
538static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
539{
540 percpu_down_write(&pcpu_rwsem);
541 return 0;
542}
543
544static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
545{
546 percpu_up_write(&pcpu_rwsem);
547}
548
549static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
550{
551 percpu_down_read(&pcpu_rwsem);
552 return 0;
553}
554
555static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
556{
557 percpu_up_read(&pcpu_rwsem);
558}
559
560static struct lock_torture_ops percpu_rwsem_lock_ops = {
561 .init = torture_percpu_rwsem_init,
562 .writelock = torture_percpu_rwsem_down_write,
563 .write_delay = torture_rwsem_write_delay,
564 .task_boost = torture_boost_dummy,
565 .writeunlock = torture_percpu_rwsem_up_write,
566 .readlock = torture_percpu_rwsem_down_read,
567 .read_delay = torture_rwsem_read_delay,
568 .readunlock = torture_percpu_rwsem_up_read,
569 .name = "percpu_rwsem_lock"
570};
571
429/* 572/*
430 * Lock torture writer kthread. Repeatedly acquires and releases 573 * Lock torture writer kthread. Repeatedly acquires and releases
431 * the lock, checking for duplicate acquisitions. 574 * the lock, checking for duplicate acquisitions.
@@ -442,6 +585,7 @@ static int lock_torture_writer(void *arg)
442 if ((torture_random(&rand) & 0xfffff) == 0) 585 if ((torture_random(&rand) & 0xfffff) == 0)
443 schedule_timeout_uninterruptible(1); 586 schedule_timeout_uninterruptible(1);
444 587
588 cxt.cur_ops->task_boost(&rand);
445 cxt.cur_ops->writelock(); 589 cxt.cur_ops->writelock();
446 if (WARN_ON_ONCE(lock_is_write_held)) 590 if (WARN_ON_ONCE(lock_is_write_held))
447 lwsp->n_lock_fail++; 591 lwsp->n_lock_fail++;
@@ -456,6 +600,8 @@ static int lock_torture_writer(void *arg)
456 600
457 stutter_wait("lock_torture_writer"); 601 stutter_wait("lock_torture_writer");
458 } while (!torture_must_stop()); 602 } while (!torture_must_stop());
603
604 cxt.cur_ops->task_boost(NULL); /* reset prio */
459 torture_kthread_stopping("lock_torture_writer"); 605 torture_kthread_stopping("lock_torture_writer");
460 return 0; 606 return 0;
461} 607}
@@ -642,7 +788,11 @@ static int __init lock_torture_init(void)
642 &spin_lock_ops, &spin_lock_irq_ops, 788 &spin_lock_ops, &spin_lock_irq_ops,
643 &rw_lock_ops, &rw_lock_irq_ops, 789 &rw_lock_ops, &rw_lock_irq_ops,
644 &mutex_lock_ops, 790 &mutex_lock_ops,
791#ifdef CONFIG_RT_MUTEXES
792 &rtmutex_lock_ops,
793#endif
645 &rwsem_lock_ops, 794 &rwsem_lock_ops,
795 &percpu_rwsem_lock_ops,
646 }; 796 };
647 797
648 if (!torture_init_begin(torture_type, verbose, &torture_runnable)) 798 if (!torture_init_begin(torture_type, verbose, &torture_runnable))
@@ -661,11 +811,11 @@ static int __init lock_torture_init(void)
661 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 811 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
662 pr_alert(" %s", torture_ops[i]->name); 812 pr_alert(" %s", torture_ops[i]->name);
663 pr_alert("\n"); 813 pr_alert("\n");
664 torture_init_end(); 814 firsterr = -EINVAL;
665 return -EINVAL; 815 goto unwind;
666 } 816 }
667 if (cxt.cur_ops->init) 817 if (cxt.cur_ops->init)
668 cxt.cur_ops->init(); /* no "goto unwind" prior to this point!!! */ 818 cxt.cur_ops->init();
669 819
670 if (nwriters_stress >= 0) 820 if (nwriters_stress >= 0)
671 cxt.nrealwriters_stress = nwriters_stress; 821 cxt.nrealwriters_stress = nwriters_stress;
@@ -676,6 +826,10 @@ static int __init lock_torture_init(void)
676 if (strncmp(torture_type, "mutex", 5) == 0) 826 if (strncmp(torture_type, "mutex", 5) == 0)
677 cxt.debug_lock = true; 827 cxt.debug_lock = true;
678#endif 828#endif
829#ifdef CONFIG_DEBUG_RT_MUTEXES
830 if (strncmp(torture_type, "rtmutex", 7) == 0)
831 cxt.debug_lock = true;
832#endif
679#ifdef CONFIG_DEBUG_SPINLOCK 833#ifdef CONFIG_DEBUG_SPINLOCK
680 if ((strncmp(torture_type, "spin", 4) == 0) || 834 if ((strncmp(torture_type, "spin", 4) == 0) ||
681 (strncmp(torture_type, "rw_lock", 7) == 0)) 835 (strncmp(torture_type, "rw_lock", 7) == 0))
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index fd91aaa4554c..5b9102a47ea5 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -67,7 +67,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
67 node->locked = 0; 67 node->locked = 0;
68 node->next = NULL; 68 node->next = NULL;
69 69
70 prev = xchg(lock, node); 70 prev = xchg_acquire(lock, node);
71 if (likely(prev == NULL)) { 71 if (likely(prev == NULL)) {
72 /* 72 /*
73 * Lock acquired, don't need to set node->locked to 1. Threads 73 * Lock acquired, don't need to set node->locked to 1. Threads
@@ -98,7 +98,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
98 /* 98 /*
99 * Release the lock by setting it to NULL 99 * Release the lock by setting it to NULL
100 */ 100 */
101 if (likely(cmpxchg(lock, node, NULL) == node)) 101 if (likely(cmpxchg_release(lock, node, NULL) == node))
102 return; 102 return;
103 /* Wait until the next pointer is set */ 103 /* Wait until the next pointer is set */
104 while (!(next = READ_ONCE(node->next))) 104 while (!(next = READ_ONCE(node->next)))
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 4cccea6b8934..0551c219c40e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -277,7 +277,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
277static inline bool mutex_try_to_acquire(struct mutex *lock) 277static inline bool mutex_try_to_acquire(struct mutex *lock)
278{ 278{
279 return !mutex_is_locked(lock) && 279 return !mutex_is_locked(lock) &&
280 (atomic_cmpxchg(&lock->count, 1, 0) == 1); 280 (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
281} 281}
282 282
283/* 283/*
@@ -529,7 +529,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
529 * Once more, try to acquire the lock. Only try-lock the mutex if 529 * Once more, try to acquire the lock. Only try-lock the mutex if
530 * it is unlocked to reduce unnecessary xchg() operations. 530 * it is unlocked to reduce unnecessary xchg() operations.
531 */ 531 */
532 if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1)) 532 if (!mutex_is_locked(lock) &&
533 (atomic_xchg_acquire(&lock->count, 0) == 1))
533 goto skip_wait; 534 goto skip_wait;
534 535
535 debug_mutex_lock_common(lock, &waiter); 536 debug_mutex_lock_common(lock, &waiter);
@@ -553,7 +554,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
553 * non-negative in order to avoid unnecessary xchg operations: 554 * non-negative in order to avoid unnecessary xchg operations:
554 */ 555 */
555 if (atomic_read(&lock->count) >= 0 && 556 if (atomic_read(&lock->count) >= 0 &&
556 (atomic_xchg(&lock->count, -1) == 1)) 557 (atomic_xchg_acquire(&lock->count, -1) == 1))
557 break; 558 break;
558 559
559 /* 560 /*
@@ -867,7 +868,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
867 868
868 spin_lock_mutex(&lock->wait_lock, flags); 869 spin_lock_mutex(&lock->wait_lock, flags);
869 870
870 prev = atomic_xchg(&lock->count, -1); 871 prev = atomic_xchg_acquire(&lock->count, -1);
871 if (likely(prev == 1)) { 872 if (likely(prev == 1)) {
872 mutex_set_owner(lock); 873 mutex_set_owner(lock);
873 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 874 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index dc85ee23a26f..d092a0c9c2d4 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -50,7 +50,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
50 50
51 for (;;) { 51 for (;;) {
52 if (atomic_read(&lock->tail) == curr && 52 if (atomic_read(&lock->tail) == curr &&
53 atomic_cmpxchg(&lock->tail, curr, old) == curr) { 53 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
54 /* 54 /*
55 * We were the last queued, we moved @lock back. @prev 55 * We were the last queued, we moved @lock back. @prev
56 * will now observe @lock and will complete its 56 * will now observe @lock and will complete its
@@ -92,7 +92,11 @@ bool osq_lock(struct optimistic_spin_queue *lock)
92 node->next = NULL; 92 node->next = NULL;
93 node->cpu = curr; 93 node->cpu = curr;
94 94
95 old = atomic_xchg(&lock->tail, curr); 95 /*
96 * ACQUIRE semantics, pairs with corresponding RELEASE
97 * in unlock() uncontended, or fastpath.
98 */
99 old = atomic_xchg_acquire(&lock->tail, curr);
96 if (old == OSQ_UNLOCKED_VAL) 100 if (old == OSQ_UNLOCKED_VAL)
97 return true; 101 return true;
98 102
@@ -184,7 +188,8 @@ void osq_unlock(struct optimistic_spin_queue *lock)
184 /* 188 /*
185 * Fast path for the uncontended case. 189 * Fast path for the uncontended case.
186 */ 190 */
187 if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr)) 191 if (likely(atomic_cmpxchg_release(&lock->tail, curr,
192 OSQ_UNLOCKED_VAL) == curr))
188 return; 193 return;
189 194
190 /* 195 /*
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index f32567254867..f231e0bb311c 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -17,50 +17,43 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
17 17
18 /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ 18 /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
19 __init_rwsem(&brw->rw_sem, name, rwsem_key); 19 __init_rwsem(&brw->rw_sem, name, rwsem_key);
20 atomic_set(&brw->write_ctr, 0); 20 rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
21 atomic_set(&brw->slow_read_ctr, 0); 21 atomic_set(&brw->slow_read_ctr, 0);
22 init_waitqueue_head(&brw->write_waitq); 22 init_waitqueue_head(&brw->write_waitq);
23 return 0; 23 return 0;
24} 24}
25EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
25 26
26void percpu_free_rwsem(struct percpu_rw_semaphore *brw) 27void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
27{ 28{
29 /*
30 * XXX: temporary kludge. The error path in alloc_super()
31 * assumes that percpu_free_rwsem() is safe after kzalloc().
32 */
33 if (!brw->fast_read_ctr)
34 return;
35
36 rcu_sync_dtor(&brw->rss);
28 free_percpu(brw->fast_read_ctr); 37 free_percpu(brw->fast_read_ctr);
29 brw->fast_read_ctr = NULL; /* catch use after free bugs */ 38 brw->fast_read_ctr = NULL; /* catch use after free bugs */
30} 39}
31 40
32/* 41/*
33 * This is the fast-path for down_read/up_read, it only needs to ensure 42 * This is the fast-path for down_read/up_read. If it succeeds we rely
34 * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the 43 * on the barriers provided by rcu_sync_enter/exit; see the comments in
35 * fast per-cpu counter. The writer uses synchronize_sched_expedited() to 44 * percpu_down_write() and percpu_up_write().
36 * serialize with the preempt-disabled section below.
37 *
38 * The nontrivial part is that we should guarantee acquire/release semantics
39 * in case when
40 *
41 * R_W: down_write() comes after up_read(), the writer should see all
42 * changes done by the reader
43 * or
44 * W_R: down_read() comes after up_write(), the reader should see all
45 * changes done by the writer
46 * 45 *
47 * If this helper fails the callers rely on the normal rw_semaphore and 46 * If this helper fails the callers rely on the normal rw_semaphore and
48 * atomic_dec_and_test(), so in this case we have the necessary barriers. 47 * atomic_dec_and_test(), so in this case we have the necessary barriers.
49 *
50 * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or
51 * __this_cpu_add() below can be reordered with any LOAD/STORE done by the
52 * reader inside the critical section. See the comments in down_write and
53 * up_write below.
54 */ 48 */
55static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val) 49static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
56{ 50{
57 bool success = false; 51 bool success;
58 52
59 preempt_disable(); 53 preempt_disable();
60 if (likely(!atomic_read(&brw->write_ctr))) { 54 success = rcu_sync_is_idle(&brw->rss);
55 if (likely(success))
61 __this_cpu_add(*brw->fast_read_ctr, val); 56 __this_cpu_add(*brw->fast_read_ctr, val);
62 success = true;
63 }
64 preempt_enable(); 57 preempt_enable();
65 58
66 return success; 59 return success;
@@ -77,16 +70,17 @@ static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
77void percpu_down_read(struct percpu_rw_semaphore *brw) 70void percpu_down_read(struct percpu_rw_semaphore *brw)
78{ 71{
79 might_sleep(); 72 might_sleep();
80 if (likely(update_fast_ctr(brw, +1))) { 73 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
81 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_); 74
75 if (likely(update_fast_ctr(brw, +1)))
82 return; 76 return;
83 }
84 77
85 down_read(&brw->rw_sem); 78 /* Avoid rwsem_acquire_read() and rwsem_release() */
79 __down_read(&brw->rw_sem);
86 atomic_inc(&brw->slow_read_ctr); 80 atomic_inc(&brw->slow_read_ctr);
87 /* avoid up_read()->rwsem_release() */
88 __up_read(&brw->rw_sem); 81 __up_read(&brw->rw_sem);
89} 82}
83EXPORT_SYMBOL_GPL(percpu_down_read);
90 84
91int percpu_down_read_trylock(struct percpu_rw_semaphore *brw) 85int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
92{ 86{
@@ -112,6 +106,7 @@ void percpu_up_read(struct percpu_rw_semaphore *brw)
112 if (atomic_dec_and_test(&brw->slow_read_ctr)) 106 if (atomic_dec_and_test(&brw->slow_read_ctr))
113 wake_up_all(&brw->write_waitq); 107 wake_up_all(&brw->write_waitq);
114} 108}
109EXPORT_SYMBOL_GPL(percpu_up_read);
115 110
116static int clear_fast_ctr(struct percpu_rw_semaphore *brw) 111static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
117{ 112{
@@ -126,33 +121,17 @@ static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
126 return sum; 121 return sum;
127} 122}
128 123
129/*
130 * A writer increments ->write_ctr to force the readers to switch to the
131 * slow mode, note the atomic_read() check in update_fast_ctr().
132 *
133 * After that the readers can only inc/dec the slow ->slow_read_ctr counter,
134 * ->fast_read_ctr is stable. Once the writer moves its sum into the slow
135 * counter it represents the number of active readers.
136 *
137 * Finally the writer takes ->rw_sem for writing and blocks the new readers,
138 * then waits until the slow counter becomes zero.
139 */
140void percpu_down_write(struct percpu_rw_semaphore *brw) 124void percpu_down_write(struct percpu_rw_semaphore *brw)
141{ 125{
142 /* tell update_fast_ctr() there is a pending writer */
143 atomic_inc(&brw->write_ctr);
144 /* 126 /*
145 * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read 127 * Make rcu_sync_is_idle() == F and thus disable the fast-path in
146 * so that update_fast_ctr() can't succeed. 128 * percpu_down_read() and percpu_up_read(), and wait for gp pass.
147 *
148 * 2. Ensures we see the result of every previous this_cpu_add() in
149 * update_fast_ctr().
150 * 129 *
151 * 3. Ensures that if any reader has exited its critical section via 130 * The latter synchronises us with the preceding readers which used
152 * fast-path, it executes a full memory barrier before we return. 131 * the fast-past, so we can not miss the result of __this_cpu_add()
153 * See R_W case in the comment above update_fast_ctr(). 132 * or anything else inside their criticial sections.
154 */ 133 */
155 synchronize_sched_expedited(); 134 rcu_sync_enter(&brw->rss);
156 135
157 /* exclude other writers, and block the new readers completely */ 136 /* exclude other writers, and block the new readers completely */
158 down_write(&brw->rw_sem); 137 down_write(&brw->rw_sem);
@@ -163,16 +142,17 @@ void percpu_down_write(struct percpu_rw_semaphore *brw)
163 /* wait for all readers to complete their percpu_up_read() */ 142 /* wait for all readers to complete their percpu_up_read() */
164 wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr)); 143 wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
165} 144}
145EXPORT_SYMBOL_GPL(percpu_down_write);
166 146
167void percpu_up_write(struct percpu_rw_semaphore *brw) 147void percpu_up_write(struct percpu_rw_semaphore *brw)
168{ 148{
169 /* release the lock, but the readers can't use the fast-path */ 149 /* release the lock, but the readers can't use the fast-path */
170 up_write(&brw->rw_sem); 150 up_write(&brw->rw_sem);
171 /* 151 /*
172 * Insert the barrier before the next fast-path in down_read, 152 * Enable the fast-path in percpu_down_read() and percpu_up_read()
173 * see W_R case in the comment above update_fast_ctr(). 153 * but only after another gp pass; this adds the necessary barrier
154 * to ensure the reader can't miss the changes done by us.
174 */ 155 */
175 synchronize_sched_expedited(); 156 rcu_sync_exit(&brw->rss);
176 /* the last writer unblocks update_fast_ctr() */
177 atomic_dec(&brw->write_ctr);
178} 157}
158EXPORT_SYMBOL_GPL(percpu_up_write);
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index f17a3e3b3550..fec082338668 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -86,7 +86,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
86 /* 86 /*
87 * Put the reader into the wait queue 87 * Put the reader into the wait queue
88 */ 88 */
89 arch_spin_lock(&lock->lock); 89 arch_spin_lock(&lock->wait_lock);
90 90
91 /* 91 /*
92 * The ACQUIRE semantics of the following spinning code ensure 92 * The ACQUIRE semantics of the following spinning code ensure
@@ -99,7 +99,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
99 /* 99 /*
100 * Signal the next one in queue to become queue head 100 * Signal the next one in queue to become queue head
101 */ 101 */
102 arch_spin_unlock(&lock->lock); 102 arch_spin_unlock(&lock->wait_lock);
103} 103}
104EXPORT_SYMBOL(queued_read_lock_slowpath); 104EXPORT_SYMBOL(queued_read_lock_slowpath);
105 105
@@ -112,7 +112,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
112 u32 cnts; 112 u32 cnts;
113 113
114 /* Put the writer into the wait queue */ 114 /* Put the writer into the wait queue */
115 arch_spin_lock(&lock->lock); 115 arch_spin_lock(&lock->wait_lock);
116 116
117 /* Try to acquire the lock directly if no reader is present */ 117 /* Try to acquire the lock directly if no reader is present */
118 if (!atomic_read(&lock->cnts) && 118 if (!atomic_read(&lock->cnts) &&
@@ -144,6 +144,6 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
144 cpu_relax_lowlatency(); 144 cpu_relax_lowlatency();
145 } 145 }
146unlock: 146unlock:
147 arch_spin_unlock(&lock->lock); 147 arch_spin_unlock(&lock->wait_lock);
148} 148}
149EXPORT_SYMBOL(queued_write_lock_slowpath); 149EXPORT_SYMBOL(queued_write_lock_slowpath);
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index c8e6e9a596f5..f0450ff4829b 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -267,7 +267,6 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
267 } 267 }
268 268
269 if (!lp) { /* ONCE */ 269 if (!lp) { /* ONCE */
270 WRITE_ONCE(pn->state, vcpu_hashed);
271 lp = pv_hash(lock, pn); 270 lp = pv_hash(lock, pn);
272 271
273 /* 272 /*
@@ -275,11 +274,9 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
275 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock() 274 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
276 * we'll be sure to be able to observe our hash entry. 275 * we'll be sure to be able to observe our hash entry.
277 * 276 *
278 * [S] pn->state
279 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL 277 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL
280 * MB RMB 278 * MB RMB
281 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> 279 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash>
282 * [L] pn->state
283 * 280 *
284 * Matches the smp_rmb() in __pv_queued_spin_unlock(). 281 * Matches the smp_rmb() in __pv_queued_spin_unlock().
285 */ 282 */
@@ -364,8 +361,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
364 * vCPU is harmless other than the additional latency in completing 361 * vCPU is harmless other than the additional latency in completing
365 * the unlock. 362 * the unlock.
366 */ 363 */
367 if (READ_ONCE(node->state) == vcpu_hashed) 364 pv_kick(node->cpu);
368 pv_kick(node->cpu);
369} 365}
370/* 366/*
371 * Include the architecture specific callee-save thunk of the 367 * Include the architecture specific callee-save thunk of the
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 7781d801212f..8251e75dd9c0 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -74,14 +74,23 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
74 * set up. 74 * set up.
75 */ 75 */
76#ifndef CONFIG_DEBUG_RT_MUTEXES 76#ifndef CONFIG_DEBUG_RT_MUTEXES
77# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) 77# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
78# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
79# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
80
81/*
82 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
83 * all future threads that attempt to [Rmw] the lock to the slowpath. As such
84 * relaxed semantics suffice.
85 */
78static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) 86static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79{ 87{
80 unsigned long owner, *p = (unsigned long *) &lock->owner; 88 unsigned long owner, *p = (unsigned long *) &lock->owner;
81 89
82 do { 90 do {
83 owner = *p; 91 owner = *p;
84 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); 92 } while (cmpxchg_relaxed(p, owner,
93 owner | RT_MUTEX_HAS_WAITERS) != owner);
85} 94}
86 95
87/* 96/*
@@ -121,11 +130,14 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
121 * lock(wait_lock); 130 * lock(wait_lock);
122 * acquire(lock); 131 * acquire(lock);
123 */ 132 */
124 return rt_mutex_cmpxchg(lock, owner, NULL); 133 return rt_mutex_cmpxchg_release(lock, owner, NULL);
125} 134}
126 135
127#else 136#else
128# define rt_mutex_cmpxchg(l,c,n) (0) 137# define rt_mutex_cmpxchg_relaxed(l,c,n) (0)
138# define rt_mutex_cmpxchg_acquire(l,c,n) (0)
139# define rt_mutex_cmpxchg_release(l,c,n) (0)
140
129static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) 141static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
130{ 142{
131 lock->owner = (struct task_struct *) 143 lock->owner = (struct task_struct *)
@@ -158,7 +170,8 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
158 * then right waiter has a dl_prio() too. 170 * then right waiter has a dl_prio() too.
159 */ 171 */
160 if (dl_prio(left->prio)) 172 if (dl_prio(left->prio))
161 return (left->task->dl.deadline < right->task->dl.deadline); 173 return dl_time_before(left->task->dl.deadline,
174 right->task->dl.deadline);
162 175
163 return 0; 176 return 0;
164} 177}
@@ -1321,7 +1334,7 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
1321 struct hrtimer_sleeper *timeout, 1334 struct hrtimer_sleeper *timeout,
1322 enum rtmutex_chainwalk chwalk)) 1335 enum rtmutex_chainwalk chwalk))
1323{ 1336{
1324 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { 1337 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1325 rt_mutex_deadlock_account_lock(lock, current); 1338 rt_mutex_deadlock_account_lock(lock, current);
1326 return 0; 1339 return 0;
1327 } else 1340 } else
@@ -1337,7 +1350,7 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1337 enum rtmutex_chainwalk chwalk)) 1350 enum rtmutex_chainwalk chwalk))
1338{ 1351{
1339 if (chwalk == RT_MUTEX_MIN_CHAINWALK && 1352 if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1340 likely(rt_mutex_cmpxchg(lock, NULL, current))) { 1353 likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1341 rt_mutex_deadlock_account_lock(lock, current); 1354 rt_mutex_deadlock_account_lock(lock, current);
1342 return 0; 1355 return 0;
1343 } else 1356 } else
@@ -1348,7 +1361,7 @@ static inline int
1348rt_mutex_fasttrylock(struct rt_mutex *lock, 1361rt_mutex_fasttrylock(struct rt_mutex *lock,
1349 int (*slowfn)(struct rt_mutex *lock)) 1362 int (*slowfn)(struct rt_mutex *lock))
1350{ 1363{
1351 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { 1364 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1352 rt_mutex_deadlock_account_lock(lock, current); 1365 rt_mutex_deadlock_account_lock(lock, current);
1353 return 1; 1366 return 1;
1354 } 1367 }
@@ -1362,7 +1375,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
1362{ 1375{
1363 WAKE_Q(wake_q); 1376 WAKE_Q(wake_q);
1364 1377
1365 if (likely(rt_mutex_cmpxchg(lock, current, NULL))) { 1378 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
1366 rt_mutex_deadlock_account_unlock(current); 1379 rt_mutex_deadlock_account_unlock(current);
1367 1380
1368 } else { 1381 } else {
@@ -1484,7 +1497,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1484bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, 1497bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
1485 struct wake_q_head *wqh) 1498 struct wake_q_head *wqh)
1486{ 1499{
1487 if (likely(rt_mutex_cmpxchg(lock, current, NULL))) { 1500 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
1488 rt_mutex_deadlock_account_unlock(current); 1501 rt_mutex_deadlock_account_unlock(current);
1489 return false; 1502 return false;
1490 } 1503 }
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 0f189714e457..a4d4de05b2d1 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
262 * to reduce unnecessary expensive cmpxchg() operations. 262 * to reduce unnecessary expensive cmpxchg() operations.
263 */ 263 */
264 if (count == RWSEM_WAITING_BIAS && 264 if (count == RWSEM_WAITING_BIAS &&
265 cmpxchg(&sem->count, RWSEM_WAITING_BIAS, 265 cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
266 RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) { 266 RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
267 if (!list_is_singular(&sem->wait_list)) 267 if (!list_is_singular(&sem->wait_list))
268 rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); 268 rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
@@ -285,7 +285,8 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
285 if (!(count == 0 || count == RWSEM_WAITING_BIAS)) 285 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
286 return false; 286 return false;
287 287
288 old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS); 288 old = cmpxchg_acquire(&sem->count, count,
289 count + RWSEM_ACTIVE_WRITE_BIAS);
289 if (old == count) { 290 if (old == count) {
290 rwsem_set_owner(sem); 291 rwsem_set_owner(sem);
291 return true; 292 return true;
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 72b0c66628b6..9d6b55587eaa 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -24,6 +24,16 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
24} 24}
25#endif 25#endif
26 26
27static void *try_ram_remap(resource_size_t offset, size_t size)
28{
29 struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
30
31 /* In the simple case just return the existing linear address */
32 if (!PageHighMem(page))
33 return __va(offset);
34 return NULL; /* fallback to ioremap_cache */
35}
36
27/** 37/**
28 * memremap() - remap an iomem_resource as cacheable memory 38 * memremap() - remap an iomem_resource as cacheable memory
29 * @offset: iomem resource start address 39 * @offset: iomem resource start address
@@ -66,8 +76,8 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
66 * the requested range is potentially in "System RAM" 76 * the requested range is potentially in "System RAM"
67 */ 77 */
68 if (is_ram == REGION_INTERSECTS) 78 if (is_ram == REGION_INTERSECTS)
69 addr = __va(offset); 79 addr = try_ram_remap(offset, size);
70 else 80 if (!addr)
71 addr = ioremap_cache(offset, size); 81 addr = ioremap_cache(offset, size);
72 } 82 }
73 83
diff --git a/kernel/module.c b/kernel/module.c
index b86b7bf1be38..8f051a106676 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1063,11 +1063,15 @@ void symbol_put_addr(void *addr)
1063 if (core_kernel_text(a)) 1063 if (core_kernel_text(a))
1064 return; 1064 return;
1065 1065
1066 /* module_text_address is safe here: we're supposed to have reference 1066 /*
1067 * to module from symbol_get, so it can't go away. */ 1067 * Even though we hold a reference on the module; we still need to
1068 * disable preemption in order to safely traverse the data structure.
1069 */
1070 preempt_disable();
1068 modaddr = __module_text_address(a); 1071 modaddr = __module_text_address(a);
1069 BUG_ON(!modaddr); 1072 BUG_ON(!modaddr);
1070 module_put(modaddr); 1073 module_put(modaddr);
1074 preempt_enable();
1071} 1075}
1072EXPORT_SYMBOL_GPL(symbol_put_addr); 1076EXPORT_SYMBOL_GPL(symbol_put_addr);
1073 1077
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index 50a808424b06..61a16569ffbf 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -1,4 +1,4 @@
1obj-y += update.o 1obj-y += update.o sync.o
2obj-$(CONFIG_SRCU) += srcu.o 2obj-$(CONFIG_SRCU) += srcu.o
3obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o 3obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
4obj-$(CONFIG_TREE_RCU) += tree.o 4obj-$(CONFIG_TREE_RCU) += tree.o
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 77192953dee5..d89328e260df 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -252,7 +252,7 @@ struct rcu_torture_ops {
252 void (*exp_sync)(void); 252 void (*exp_sync)(void);
253 unsigned long (*get_state)(void); 253 unsigned long (*get_state)(void);
254 void (*cond_sync)(unsigned long oldstate); 254 void (*cond_sync)(unsigned long oldstate);
255 void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 255 call_rcu_func_t call;
256 void (*cb_barrier)(void); 256 void (*cb_barrier)(void);
257 void (*fqs)(void); 257 void (*fqs)(void);
258 void (*stats)(void); 258 void (*stats)(void);
@@ -448,7 +448,7 @@ static void synchronize_rcu_busted(void)
448} 448}
449 449
450static void 450static void
451call_rcu_busted(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 451call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
452{ 452{
453 /* This is a deliberate bug for testing purposes only! */ 453 /* This is a deliberate bug for testing purposes only! */
454 func(head); 454 func(head);
@@ -523,7 +523,7 @@ static void srcu_torture_synchronize(void)
523} 523}
524 524
525static void srcu_torture_call(struct rcu_head *head, 525static void srcu_torture_call(struct rcu_head *head,
526 void (*func)(struct rcu_head *head)) 526 rcu_callback_t func)
527{ 527{
528 call_srcu(srcu_ctlp, head, func); 528 call_srcu(srcu_ctlp, head, func);
529} 529}
@@ -695,7 +695,7 @@ static bool __maybe_unused torturing_tasks(void)
695 695
696#define RCUTORTURE_TASKS_OPS 696#define RCUTORTURE_TASKS_OPS
697 697
698static bool torturing_tasks(void) 698static bool __maybe_unused torturing_tasks(void)
699{ 699{
700 return false; 700 return false;
701} 701}
@@ -768,7 +768,6 @@ static int rcu_torture_boost(void *arg)
768 } 768 }
769 call_rcu_time = jiffies; 769 call_rcu_time = jiffies;
770 } 770 }
771 cond_resched_rcu_qs();
772 stutter_wait("rcu_torture_boost"); 771 stutter_wait("rcu_torture_boost");
773 if (torture_must_stop()) 772 if (torture_must_stop())
774 goto checkwait; 773 goto checkwait;
@@ -1208,7 +1207,6 @@ rcu_torture_reader(void *arg)
1208 __this_cpu_inc(rcu_torture_batch[completed]); 1207 __this_cpu_inc(rcu_torture_batch[completed]);
1209 preempt_enable(); 1208 preempt_enable();
1210 cur_ops->readunlock(idx); 1209 cur_ops->readunlock(idx);
1211 cond_resched_rcu_qs();
1212 stutter_wait("rcu_torture_reader"); 1210 stutter_wait("rcu_torture_reader");
1213 } while (!torture_must_stop()); 1211 } while (!torture_must_stop());
1214 if (irqreader && cur_ops->irq_capable) { 1212 if (irqreader && cur_ops->irq_capable) {
@@ -1742,15 +1740,15 @@ rcu_torture_init(void)
1742 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 1740 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1743 pr_alert(" %s", torture_ops[i]->name); 1741 pr_alert(" %s", torture_ops[i]->name);
1744 pr_alert("\n"); 1742 pr_alert("\n");
1745 torture_init_end(); 1743 firsterr = -EINVAL;
1746 return -EINVAL; 1744 goto unwind;
1747 } 1745 }
1748 if (cur_ops->fqs == NULL && fqs_duration != 0) { 1746 if (cur_ops->fqs == NULL && fqs_duration != 0) {
1749 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 1747 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
1750 fqs_duration = 0; 1748 fqs_duration = 0;
1751 } 1749 }
1752 if (cur_ops->init) 1750 if (cur_ops->init)
1753 cur_ops->init(); /* no "goto unwind" prior to this point!!! */ 1751 cur_ops->init();
1754 1752
1755 if (nreaders >= 0) { 1753 if (nreaders >= 0) {
1756 nrealreaders = nreaders; 1754 nrealreaders = nreaders;
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index d3fcb2ec8536..a63a1ea5a41b 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -298,11 +298,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
298 int idx; 298 int idx;
299 299
300 idx = READ_ONCE(sp->completed) & 0x1; 300 idx = READ_ONCE(sp->completed) & 0x1;
301 preempt_disable();
302 __this_cpu_inc(sp->per_cpu_ref->c[idx]); 301 __this_cpu_inc(sp->per_cpu_ref->c[idx]);
303 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 302 smp_mb(); /* B */ /* Avoid leaking the critical section. */
304 __this_cpu_inc(sp->per_cpu_ref->seq[idx]); 303 __this_cpu_inc(sp->per_cpu_ref->seq[idx]);
305 preempt_enable();
306 return idx; 304 return idx;
307} 305}
308EXPORT_SYMBOL_GPL(__srcu_read_lock); 306EXPORT_SYMBOL_GPL(__srcu_read_lock);
@@ -387,7 +385,7 @@ static void srcu_flip(struct srcu_struct *sp)
387 * srcu_struct structure. 385 * srcu_struct structure.
388 */ 386 */
389void call_srcu(struct srcu_struct *sp, struct rcu_head *head, 387void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
390 void (*func)(struct rcu_head *head)) 388 rcu_callback_t func)
391{ 389{
392 unsigned long flags; 390 unsigned long flags;
393 391
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
new file mode 100644
index 000000000000..be922c9f3d37
--- /dev/null
+++ b/kernel/rcu/sync.c
@@ -0,0 +1,223 @@
1/*
2 * RCU-based infrastructure for lightweight reader-writer locking
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (c) 2015, Red Hat, Inc.
19 *
20 * Author: Oleg Nesterov <oleg@redhat.com>
21 */
22
23#include <linux/rcu_sync.h>
24#include <linux/sched.h>
25
26#ifdef CONFIG_PROVE_RCU
27#define __INIT_HELD(func) .held = func,
28#else
29#define __INIT_HELD(func)
30#endif
31
32static const struct {
33 void (*sync)(void);
34 void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
35 void (*wait)(void);
36#ifdef CONFIG_PROVE_RCU
37 int (*held)(void);
38#endif
39} gp_ops[] = {
40 [RCU_SYNC] = {
41 .sync = synchronize_rcu,
42 .call = call_rcu,
43 .wait = rcu_barrier,
44 __INIT_HELD(rcu_read_lock_held)
45 },
46 [RCU_SCHED_SYNC] = {
47 .sync = synchronize_sched,
48 .call = call_rcu_sched,
49 .wait = rcu_barrier_sched,
50 __INIT_HELD(rcu_read_lock_sched_held)
51 },
52 [RCU_BH_SYNC] = {
53 .sync = synchronize_rcu_bh,
54 .call = call_rcu_bh,
55 .wait = rcu_barrier_bh,
56 __INIT_HELD(rcu_read_lock_bh_held)
57 },
58};
59
60enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
61enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
62
63#define rss_lock gp_wait.lock
64
65#ifdef CONFIG_PROVE_RCU
66void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
67{
68 RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
69 "suspicious rcu_sync_is_idle() usage");
70}
71#endif
72
73/**
74 * rcu_sync_init() - Initialize an rcu_sync structure
75 * @rsp: Pointer to rcu_sync structure to be initialized
76 * @type: Flavor of RCU with which to synchronize rcu_sync structure
77 */
78void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
79{
80 memset(rsp, 0, sizeof(*rsp));
81 init_waitqueue_head(&rsp->gp_wait);
82 rsp->gp_type = type;
83}
84
85/**
86 * rcu_sync_enter() - Force readers onto slowpath
87 * @rsp: Pointer to rcu_sync structure to use for synchronization
88 *
89 * This function is used by updaters who need readers to make use of
90 * a slowpath during the update. After this function returns, all
91 * subsequent calls to rcu_sync_is_idle() will return false, which
92 * tells readers to stay off their fastpaths. A later call to
93 * rcu_sync_exit() re-enables reader slowpaths.
94 *
95 * When called in isolation, rcu_sync_enter() must wait for a grace
96 * period, however, closely spaced calls to rcu_sync_enter() can
97 * optimize away the grace-period wait via a state machine implemented
98 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
99 */
100void rcu_sync_enter(struct rcu_sync *rsp)
101{
102 bool need_wait, need_sync;
103
104 spin_lock_irq(&rsp->rss_lock);
105 need_wait = rsp->gp_count++;
106 need_sync = rsp->gp_state == GP_IDLE;
107 if (need_sync)
108 rsp->gp_state = GP_PENDING;
109 spin_unlock_irq(&rsp->rss_lock);
110
111 BUG_ON(need_wait && need_sync);
112
113 if (need_sync) {
114 gp_ops[rsp->gp_type].sync();
115 rsp->gp_state = GP_PASSED;
116 wake_up_all(&rsp->gp_wait);
117 } else if (need_wait) {
118 wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
119 } else {
120 /*
121 * Possible when there's a pending CB from a rcu_sync_exit().
122 * Nobody has yet been allowed the 'fast' path and thus we can
123 * avoid doing any sync(). The callback will get 'dropped'.
124 */
125 BUG_ON(rsp->gp_state != GP_PASSED);
126 }
127}
128
129/**
130 * rcu_sync_func() - Callback function managing reader access to fastpath
131 * @rsp: Pointer to rcu_sync structure to use for synchronization
132 *
133 * This function is passed to one of the call_rcu() functions by
134 * rcu_sync_exit(), so that it is invoked after a grace period following the
135 * that invocation of rcu_sync_exit(). It takes action based on events that
136 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
137 * and rcu_sync_exit() pairs need not wait for a grace period.
138 *
139 * If another rcu_sync_enter() is invoked before the grace period
140 * ended, reset state to allow the next rcu_sync_exit() to let the
141 * readers back onto their fastpaths (after a grace period). If both
142 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
143 * before the grace period ended, re-invoke call_rcu() on behalf of that
144 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
145 * can again use their fastpaths.
146 */
147static void rcu_sync_func(struct rcu_head *rcu)
148{
149 struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head);
150 unsigned long flags;
151
152 BUG_ON(rsp->gp_state != GP_PASSED);
153 BUG_ON(rsp->cb_state == CB_IDLE);
154
155 spin_lock_irqsave(&rsp->rss_lock, flags);
156 if (rsp->gp_count) {
157 /*
158 * A new rcu_sync_begin() has happened; drop the callback.
159 */
160 rsp->cb_state = CB_IDLE;
161 } else if (rsp->cb_state == CB_REPLAY) {
162 /*
163 * A new rcu_sync_exit() has happened; requeue the callback
164 * to catch a later GP.
165 */
166 rsp->cb_state = CB_PENDING;
167 gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
168 } else {
169 /*
170 * We're at least a GP after rcu_sync_exit(); eveybody will now
171 * have observed the write side critical section. Let 'em rip!.
172 */
173 rsp->cb_state = CB_IDLE;
174 rsp->gp_state = GP_IDLE;
175 }
176 spin_unlock_irqrestore(&rsp->rss_lock, flags);
177}
178
179/**
180 * rcu_sync_exit() - Allow readers back onto fast patch after grace period
181 * @rsp: Pointer to rcu_sync structure to use for synchronization
182 *
183 * This function is used by updaters who have completed, and can therefore
184 * now allow readers to make use of their fastpaths after a grace period
185 * has elapsed. After this grace period has completed, all subsequent
186 * calls to rcu_sync_is_idle() will return true, which tells readers that
187 * they can once again use their fastpaths.
188 */
189void rcu_sync_exit(struct rcu_sync *rsp)
190{
191 spin_lock_irq(&rsp->rss_lock);
192 if (!--rsp->gp_count) {
193 if (rsp->cb_state == CB_IDLE) {
194 rsp->cb_state = CB_PENDING;
195 gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
196 } else if (rsp->cb_state == CB_PENDING) {
197 rsp->cb_state = CB_REPLAY;
198 }
199 }
200 spin_unlock_irq(&rsp->rss_lock);
201}
202
203/**
204 * rcu_sync_dtor() - Clean up an rcu_sync structure
205 * @rsp: Pointer to rcu_sync structure to be cleaned up
206 */
207void rcu_sync_dtor(struct rcu_sync *rsp)
208{
209 int cb_state;
210
211 BUG_ON(rsp->gp_count);
212
213 spin_lock_irq(&rsp->rss_lock);
214 if (rsp->cb_state == CB_REPLAY)
215 rsp->cb_state = CB_PENDING;
216 cb_state = rsp->cb_state;
217 spin_unlock_irq(&rsp->rss_lock);
218
219 if (cb_state != CB_IDLE) {
220 gp_ops[rsp->gp_type].wait();
221 BUG_ON(rsp->cb_state != CB_IDLE);
222 }
223}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index d0471056d0af..944b1b491ed8 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -44,7 +44,7 @@ struct rcu_ctrlblk;
44static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); 44static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
45static void rcu_process_callbacks(struct softirq_action *unused); 45static void rcu_process_callbacks(struct softirq_action *unused);
46static void __call_rcu(struct rcu_head *head, 46static void __call_rcu(struct rcu_head *head,
47 void (*func)(struct rcu_head *rcu), 47 rcu_callback_t func,
48 struct rcu_ctrlblk *rcp); 48 struct rcu_ctrlblk *rcp);
49 49
50#include "tiny_plugin.h" 50#include "tiny_plugin.h"
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
203 * Helper function for call_rcu() and call_rcu_bh(). 203 * Helper function for call_rcu() and call_rcu_bh().
204 */ 204 */
205static void __call_rcu(struct rcu_head *head, 205static void __call_rcu(struct rcu_head *head,
206 void (*func)(struct rcu_head *rcu), 206 rcu_callback_t func,
207 struct rcu_ctrlblk *rcp) 207 struct rcu_ctrlblk *rcp)
208{ 208{
209 unsigned long flags; 209 unsigned long flags;
@@ -229,7 +229,7 @@ static void __call_rcu(struct rcu_head *head,
229 * period. But since we have but one CPU, that would be after any 229 * period. But since we have but one CPU, that would be after any
230 * quiescent state. 230 * quiescent state.
231 */ 231 */
232void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 232void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
233{ 233{
234 __call_rcu(head, func, &rcu_sched_ctrlblk); 234 __call_rcu(head, func, &rcu_sched_ctrlblk);
235} 235}
@@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
239 * Post an RCU bottom-half callback to be invoked after any subsequent 239 * Post an RCU bottom-half callback to be invoked after any subsequent
240 * quiescent state. 240 * quiescent state.
241 */ 241 */
242void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 242void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
243{ 243{
244 __call_rcu(head, func, &rcu_bh_ctrlblk); 244 __call_rcu(head, func, &rcu_bh_ctrlblk);
245} 245}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 775d36cc0050..f07343b54fe5 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -71,7 +71,6 @@ MODULE_ALIAS("rcutree");
71static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 71static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
72static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 72static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
73static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS]; 73static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
74static struct lock_class_key rcu_exp_sched_class[RCU_NUM_LVLS];
75 74
76/* 75/*
77 * In order to export the rcu_state name to the tracing tools, it 76 * In order to export the rcu_state name to the tracing tools, it
@@ -98,7 +97,7 @@ struct rcu_state sname##_state = { \
98 .level = { &sname##_state.node[0] }, \ 97 .level = { &sname##_state.node[0] }, \
99 .rda = &sname##_data, \ 98 .rda = &sname##_data, \
100 .call = cr, \ 99 .call = cr, \
101 .fqs_state = RCU_GP_IDLE, \ 100 .gp_state = RCU_GP_IDLE, \
102 .gpnum = 0UL - 300UL, \ 101 .gpnum = 0UL - 300UL, \
103 .completed = 0UL - 300UL, \ 102 .completed = 0UL - 300UL, \
104 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \ 103 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
@@ -161,6 +160,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
161static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 160static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
162static void invoke_rcu_core(void); 161static void invoke_rcu_core(void);
163static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 162static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
163static void rcu_report_exp_rdp(struct rcu_state *rsp,
164 struct rcu_data *rdp, bool wake);
164 165
165/* rcuc/rcub kthread realtime priority */ 166/* rcuc/rcub kthread realtime priority */
166#ifdef CONFIG_RCU_KTHREAD_PRIO 167#ifdef CONFIG_RCU_KTHREAD_PRIO
@@ -245,21 +246,33 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
245 */ 246 */
246void rcu_sched_qs(void) 247void rcu_sched_qs(void)
247{ 248{
248 if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) { 249 unsigned long flags;
250
251 if (__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) {
249 trace_rcu_grace_period(TPS("rcu_sched"), 252 trace_rcu_grace_period(TPS("rcu_sched"),
250 __this_cpu_read(rcu_sched_data.gpnum), 253 __this_cpu_read(rcu_sched_data.gpnum),
251 TPS("cpuqs")); 254 TPS("cpuqs"));
252 __this_cpu_write(rcu_sched_data.passed_quiesce, 1); 255 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
256 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
257 return;
258 local_irq_save(flags);
259 if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
260 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
261 rcu_report_exp_rdp(&rcu_sched_state,
262 this_cpu_ptr(&rcu_sched_data),
263 true);
264 }
265 local_irq_restore(flags);
253 } 266 }
254} 267}
255 268
256void rcu_bh_qs(void) 269void rcu_bh_qs(void)
257{ 270{
258 if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) { 271 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
259 trace_rcu_grace_period(TPS("rcu_bh"), 272 trace_rcu_grace_period(TPS("rcu_bh"),
260 __this_cpu_read(rcu_bh_data.gpnum), 273 __this_cpu_read(rcu_bh_data.gpnum),
261 TPS("cpuqs")); 274 TPS("cpuqs"));
262 __this_cpu_write(rcu_bh_data.passed_quiesce, 1); 275 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
263 } 276 }
264} 277}
265 278
@@ -337,12 +350,14 @@ static void rcu_momentary_dyntick_idle(void)
337 */ 350 */
338void rcu_note_context_switch(void) 351void rcu_note_context_switch(void)
339{ 352{
353 barrier(); /* Avoid RCU read-side critical sections leaking down. */
340 trace_rcu_utilization(TPS("Start context switch")); 354 trace_rcu_utilization(TPS("Start context switch"));
341 rcu_sched_qs(); 355 rcu_sched_qs();
342 rcu_preempt_note_context_switch(); 356 rcu_preempt_note_context_switch();
343 if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) 357 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
344 rcu_momentary_dyntick_idle(); 358 rcu_momentary_dyntick_idle();
345 trace_rcu_utilization(TPS("End context switch")); 359 trace_rcu_utilization(TPS("End context switch"));
360 barrier(); /* Avoid RCU read-side critical sections leaking up. */
346} 361}
347EXPORT_SYMBOL_GPL(rcu_note_context_switch); 362EXPORT_SYMBOL_GPL(rcu_note_context_switch);
348 363
@@ -353,12 +368,19 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
353 * RCU flavors in desperate need of a quiescent state, which will normally 368 * RCU flavors in desperate need of a quiescent state, which will normally
354 * be none of them). Either way, do a lightweight quiescent state for 369 * be none of them). Either way, do a lightweight quiescent state for
355 * all RCU flavors. 370 * all RCU flavors.
371 *
372 * The barrier() calls are redundant in the common case when this is
373 * called externally, but just in case this is called from within this
374 * file.
375 *
356 */ 376 */
357void rcu_all_qs(void) 377void rcu_all_qs(void)
358{ 378{
379 barrier(); /* Avoid RCU read-side critical sections leaking down. */
359 if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) 380 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
360 rcu_momentary_dyntick_idle(); 381 rcu_momentary_dyntick_idle();
361 this_cpu_inc(rcu_qs_ctr); 382 this_cpu_inc(rcu_qs_ctr);
383 barrier(); /* Avoid RCU read-side critical sections leaking up. */
362} 384}
363EXPORT_SYMBOL_GPL(rcu_all_qs); 385EXPORT_SYMBOL_GPL(rcu_all_qs);
364 386
@@ -1744,9 +1766,9 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1744 */ 1766 */
1745 rdp->gpnum = rnp->gpnum; 1767 rdp->gpnum = rnp->gpnum;
1746 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); 1768 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1747 rdp->passed_quiesce = 0; 1769 rdp->cpu_no_qs.b.norm = true;
1748 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); 1770 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1749 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); 1771 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1750 zero_cpu_stall_ticks(rdp); 1772 zero_cpu_stall_ticks(rdp);
1751 WRITE_ONCE(rdp->gpwrap, false); 1773 WRITE_ONCE(rdp->gpwrap, false);
1752 } 1774 }
@@ -1927,16 +1949,15 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
1927/* 1949/*
1928 * Do one round of quiescent-state forcing. 1950 * Do one round of quiescent-state forcing.
1929 */ 1951 */
1930static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) 1952static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
1931{ 1953{
1932 int fqs_state = fqs_state_in;
1933 bool isidle = false; 1954 bool isidle = false;
1934 unsigned long maxj; 1955 unsigned long maxj;
1935 struct rcu_node *rnp = rcu_get_root(rsp); 1956 struct rcu_node *rnp = rcu_get_root(rsp);
1936 1957
1937 WRITE_ONCE(rsp->gp_activity, jiffies); 1958 WRITE_ONCE(rsp->gp_activity, jiffies);
1938 rsp->n_force_qs++; 1959 rsp->n_force_qs++;
1939 if (fqs_state == RCU_SAVE_DYNTICK) { 1960 if (first_time) {
1940 /* Collect dyntick-idle snapshots. */ 1961 /* Collect dyntick-idle snapshots. */
1941 if (is_sysidle_rcu_state(rsp)) { 1962 if (is_sysidle_rcu_state(rsp)) {
1942 isidle = true; 1963 isidle = true;
@@ -1945,7 +1966,6 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1945 force_qs_rnp(rsp, dyntick_save_progress_counter, 1966 force_qs_rnp(rsp, dyntick_save_progress_counter,
1946 &isidle, &maxj); 1967 &isidle, &maxj);
1947 rcu_sysidle_report_gp(rsp, isidle, maxj); 1968 rcu_sysidle_report_gp(rsp, isidle, maxj);
1948 fqs_state = RCU_FORCE_QS;
1949 } else { 1969 } else {
1950 /* Handle dyntick-idle and offline CPUs. */ 1970 /* Handle dyntick-idle and offline CPUs. */
1951 isidle = true; 1971 isidle = true;
@@ -1959,7 +1979,6 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1959 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); 1979 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
1960 raw_spin_unlock_irq(&rnp->lock); 1980 raw_spin_unlock_irq(&rnp->lock);
1961 } 1981 }
1962 return fqs_state;
1963} 1982}
1964 1983
1965/* 1984/*
@@ -2023,7 +2042,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2023 /* Declare grace period done. */ 2042 /* Declare grace period done. */
2024 WRITE_ONCE(rsp->completed, rsp->gpnum); 2043 WRITE_ONCE(rsp->completed, rsp->gpnum);
2025 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); 2044 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
2026 rsp->fqs_state = RCU_GP_IDLE; 2045 rsp->gp_state = RCU_GP_IDLE;
2027 rdp = this_cpu_ptr(rsp->rda); 2046 rdp = this_cpu_ptr(rsp->rda);
2028 /* Advance CBs to reduce false positives below. */ 2047 /* Advance CBs to reduce false positives below. */
2029 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; 2048 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
@@ -2041,7 +2060,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2041 */ 2060 */
2042static int __noreturn rcu_gp_kthread(void *arg) 2061static int __noreturn rcu_gp_kthread(void *arg)
2043{ 2062{
2044 int fqs_state; 2063 bool first_gp_fqs;
2045 int gf; 2064 int gf;
2046 unsigned long j; 2065 unsigned long j;
2047 int ret; 2066 int ret;
@@ -2073,7 +2092,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2073 } 2092 }
2074 2093
2075 /* Handle quiescent-state forcing. */ 2094 /* Handle quiescent-state forcing. */
2076 fqs_state = RCU_SAVE_DYNTICK; 2095 first_gp_fqs = true;
2077 j = jiffies_till_first_fqs; 2096 j = jiffies_till_first_fqs;
2078 if (j > HZ) { 2097 if (j > HZ) {
2079 j = HZ; 2098 j = HZ;
@@ -2101,7 +2120,8 @@ static int __noreturn rcu_gp_kthread(void *arg)
2101 trace_rcu_grace_period(rsp->name, 2120 trace_rcu_grace_period(rsp->name,
2102 READ_ONCE(rsp->gpnum), 2121 READ_ONCE(rsp->gpnum),
2103 TPS("fqsstart")); 2122 TPS("fqsstart"));
2104 fqs_state = rcu_gp_fqs(rsp, fqs_state); 2123 rcu_gp_fqs(rsp, first_gp_fqs);
2124 first_gp_fqs = false;
2105 trace_rcu_grace_period(rsp->name, 2125 trace_rcu_grace_period(rsp->name,
2106 READ_ONCE(rsp->gpnum), 2126 READ_ONCE(rsp->gpnum),
2107 TPS("fqsend")); 2127 TPS("fqsend"));
@@ -2337,7 +2357,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2337 rnp = rdp->mynode; 2357 rnp = rdp->mynode;
2338 raw_spin_lock_irqsave(&rnp->lock, flags); 2358 raw_spin_lock_irqsave(&rnp->lock, flags);
2339 smp_mb__after_unlock_lock(); 2359 smp_mb__after_unlock_lock();
2340 if ((rdp->passed_quiesce == 0 && 2360 if ((rdp->cpu_no_qs.b.norm &&
2341 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) || 2361 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
2342 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum || 2362 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
2343 rdp->gpwrap) { 2363 rdp->gpwrap) {
@@ -2348,7 +2368,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2348 * We will instead need a new quiescent state that lies 2368 * We will instead need a new quiescent state that lies
2349 * within the current grace period. 2369 * within the current grace period.
2350 */ 2370 */
2351 rdp->passed_quiesce = 0; /* need qs for new gp. */ 2371 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2352 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); 2372 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
2353 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2373 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2354 return; 2374 return;
@@ -2357,7 +2377,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2357 if ((rnp->qsmask & mask) == 0) { 2377 if ((rnp->qsmask & mask) == 0) {
2358 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2378 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2359 } else { 2379 } else {
2360 rdp->qs_pending = 0; 2380 rdp->core_needs_qs = 0;
2361 2381
2362 /* 2382 /*
2363 * This GP can't end until cpu checks in, so all of our 2383 * This GP can't end until cpu checks in, so all of our
@@ -2388,14 +2408,14 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2388 * Does this CPU still need to do its part for current grace period? 2408 * Does this CPU still need to do its part for current grace period?
2389 * If no, return and let the other CPUs do their part as well. 2409 * If no, return and let the other CPUs do their part as well.
2390 */ 2410 */
2391 if (!rdp->qs_pending) 2411 if (!rdp->core_needs_qs)
2392 return; 2412 return;
2393 2413
2394 /* 2414 /*
2395 * Was there a quiescent state since the beginning of the grace 2415 * Was there a quiescent state since the beginning of the grace
2396 * period? If no, then exit and wait for the next call. 2416 * period? If no, then exit and wait for the next call.
2397 */ 2417 */
2398 if (!rdp->passed_quiesce && 2418 if (rdp->cpu_no_qs.b.norm &&
2399 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) 2419 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
2400 return; 2420 return;
2401 2421
@@ -3017,7 +3037,7 @@ static void rcu_leak_callback(struct rcu_head *rhp)
3017 * is expected to specify a CPU. 3037 * is expected to specify a CPU.
3018 */ 3038 */
3019static void 3039static void
3020__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 3040__call_rcu(struct rcu_head *head, rcu_callback_t func,
3021 struct rcu_state *rsp, int cpu, bool lazy) 3041 struct rcu_state *rsp, int cpu, bool lazy)
3022{ 3042{
3023 unsigned long flags; 3043 unsigned long flags;
@@ -3088,7 +3108,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
3088/* 3108/*
3089 * Queue an RCU-sched callback for invocation after a grace period. 3109 * Queue an RCU-sched callback for invocation after a grace period.
3090 */ 3110 */
3091void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 3111void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
3092{ 3112{
3093 __call_rcu(head, func, &rcu_sched_state, -1, 0); 3113 __call_rcu(head, func, &rcu_sched_state, -1, 0);
3094} 3114}
@@ -3097,7 +3117,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
3097/* 3117/*
3098 * Queue an RCU callback for invocation after a quicker grace period. 3118 * Queue an RCU callback for invocation after a quicker grace period.
3099 */ 3119 */
3100void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 3120void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
3101{ 3121{
3102 __call_rcu(head, func, &rcu_bh_state, -1, 0); 3122 __call_rcu(head, func, &rcu_bh_state, -1, 0);
3103} 3123}
@@ -3111,7 +3131,7 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
3111 * function may only be called from __kfree_rcu(). 3131 * function may only be called from __kfree_rcu().
3112 */ 3132 */
3113void kfree_call_rcu(struct rcu_head *head, 3133void kfree_call_rcu(struct rcu_head *head,
3114 void (*func)(struct rcu_head *rcu)) 3134 rcu_callback_t func)
3115{ 3135{
3116 __call_rcu(head, func, rcu_state_p, -1, 1); 3136 __call_rcu(head, func, rcu_state_p, -1, 1);
3117} 3137}
@@ -3379,6 +3399,191 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
3379 return rcu_seq_done(&rsp->expedited_sequence, s); 3399 return rcu_seq_done(&rsp->expedited_sequence, s);
3380} 3400}
3381 3401
3402/*
3403 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
3404 * recent CPU-online activity. Note that these masks are not cleared
3405 * when CPUs go offline, so they reflect the union of all CPUs that have
3406 * ever been online. This means that this function normally takes its
3407 * no-work-to-do fastpath.
3408 */
3409static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
3410{
3411 bool done;
3412 unsigned long flags;
3413 unsigned long mask;
3414 unsigned long oldmask;
3415 int ncpus = READ_ONCE(rsp->ncpus);
3416 struct rcu_node *rnp;
3417 struct rcu_node *rnp_up;
3418
3419 /* If no new CPUs onlined since last time, nothing to do. */
3420 if (likely(ncpus == rsp->ncpus_snap))
3421 return;
3422 rsp->ncpus_snap = ncpus;
3423
3424 /*
3425 * Each pass through the following loop propagates newly onlined
3426 * CPUs for the current rcu_node structure up the rcu_node tree.
3427 */
3428 rcu_for_each_leaf_node(rsp, rnp) {
3429 raw_spin_lock_irqsave(&rnp->lock, flags);
3430 smp_mb__after_unlock_lock();
3431 if (rnp->expmaskinit == rnp->expmaskinitnext) {
3432 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3433 continue; /* No new CPUs, nothing to do. */
3434 }
3435
3436 /* Update this node's mask, track old value for propagation. */
3437 oldmask = rnp->expmaskinit;
3438 rnp->expmaskinit = rnp->expmaskinitnext;
3439 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3440
3441 /* If was already nonzero, nothing to propagate. */
3442 if (oldmask)
3443 continue;
3444
3445 /* Propagate the new CPU up the tree. */
3446 mask = rnp->grpmask;
3447 rnp_up = rnp->parent;
3448 done = false;
3449 while (rnp_up) {
3450 raw_spin_lock_irqsave(&rnp_up->lock, flags);
3451 smp_mb__after_unlock_lock();
3452 if (rnp_up->expmaskinit)
3453 done = true;
3454 rnp_up->expmaskinit |= mask;
3455 raw_spin_unlock_irqrestore(&rnp_up->lock, flags);
3456 if (done)
3457 break;
3458 mask = rnp_up->grpmask;
3459 rnp_up = rnp_up->parent;
3460 }
3461 }
3462}
3463
3464/*
3465 * Reset the ->expmask values in the rcu_node tree in preparation for
3466 * a new expedited grace period.
3467 */
3468static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
3469{
3470 unsigned long flags;
3471 struct rcu_node *rnp;
3472
3473 sync_exp_reset_tree_hotplug(rsp);
3474 rcu_for_each_node_breadth_first(rsp, rnp) {
3475 raw_spin_lock_irqsave(&rnp->lock, flags);
3476 smp_mb__after_unlock_lock();
3477 WARN_ON_ONCE(rnp->expmask);
3478 rnp->expmask = rnp->expmaskinit;
3479 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3480 }
3481}
3482
3483/*
3484 * Return non-zero if there is no RCU expedited grace period in progress
3485 * for the specified rcu_node structure, in other words, if all CPUs and
3486 * tasks covered by the specified rcu_node structure have done their bit
3487 * for the current expedited grace period. Works only for preemptible
3488 * RCU -- other RCU implementation use other means.
3489 *
3490 * Caller must hold the root rcu_node's exp_funnel_mutex.
3491 */
3492static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
3493{
3494 return rnp->exp_tasks == NULL &&
3495 READ_ONCE(rnp->expmask) == 0;
3496}
3497
3498/*
3499 * Report the exit from RCU read-side critical section for the last task
3500 * that queued itself during or before the current expedited preemptible-RCU
3501 * grace period. This event is reported either to the rcu_node structure on
3502 * which the task was queued or to one of that rcu_node structure's ancestors,
3503 * recursively up the tree. (Calm down, calm down, we do the recursion
3504 * iteratively!)
3505 *
3506 * Caller must hold the root rcu_node's exp_funnel_mutex and the
3507 * specified rcu_node structure's ->lock.
3508 */
3509static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
3510 bool wake, unsigned long flags)
3511 __releases(rnp->lock)
3512{
3513 unsigned long mask;
3514
3515 for (;;) {
3516 if (!sync_rcu_preempt_exp_done(rnp)) {
3517 if (!rnp->expmask)
3518 rcu_initiate_boost(rnp, flags);
3519 else
3520 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3521 break;
3522 }
3523 if (rnp->parent == NULL) {
3524 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3525 if (wake) {
3526 smp_mb(); /* EGP done before wake_up(). */
3527 wake_up(&rsp->expedited_wq);
3528 }
3529 break;
3530 }
3531 mask = rnp->grpmask;
3532 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
3533 rnp = rnp->parent;
3534 raw_spin_lock(&rnp->lock); /* irqs already disabled */
3535 smp_mb__after_unlock_lock();
3536 WARN_ON_ONCE(!(rnp->expmask & mask));
3537 rnp->expmask &= ~mask;
3538 }
3539}
3540
3541/*
3542 * Report expedited quiescent state for specified node. This is a
3543 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
3544 *
3545 * Caller must hold the root rcu_node's exp_funnel_mutex.
3546 */
3547static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
3548 struct rcu_node *rnp, bool wake)
3549{
3550 unsigned long flags;
3551
3552 raw_spin_lock_irqsave(&rnp->lock, flags);
3553 smp_mb__after_unlock_lock();
3554 __rcu_report_exp_rnp(rsp, rnp, wake, flags);
3555}
3556
3557/*
3558 * Report expedited quiescent state for multiple CPUs, all covered by the
3559 * specified leaf rcu_node structure. Caller must hold the root
3560 * rcu_node's exp_funnel_mutex.
3561 */
3562static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
3563 unsigned long mask, bool wake)
3564{
3565 unsigned long flags;
3566
3567 raw_spin_lock_irqsave(&rnp->lock, flags);
3568 smp_mb__after_unlock_lock();
3569 if (!(rnp->expmask & mask)) {
3570 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3571 return;
3572 }
3573 rnp->expmask &= ~mask;
3574 __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
3575}
3576
3577/*
3578 * Report expedited quiescent state for specified rcu_data (CPU).
3579 * Caller must hold the root rcu_node's exp_funnel_mutex.
3580 */
3581static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
3582 bool wake)
3583{
3584 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
3585}
3586
3382/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ 3587/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
3383static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp, 3588static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
3384 struct rcu_data *rdp, 3589 struct rcu_data *rdp,
@@ -3455,16 +3660,111 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
3455} 3660}
3456 3661
3457/* Invoked on each online non-idle CPU for expedited quiescent state. */ 3662/* Invoked on each online non-idle CPU for expedited quiescent state. */
3458static int synchronize_sched_expedited_cpu_stop(void *data) 3663static void sync_sched_exp_handler(void *data)
3459{ 3664{
3460 struct rcu_data *rdp = data; 3665 struct rcu_data *rdp;
3461 struct rcu_state *rsp = rdp->rsp; 3666 struct rcu_node *rnp;
3667 struct rcu_state *rsp = data;
3462 3668
3463 /* We are here: If we are last, do the wakeup. */ 3669 rdp = this_cpu_ptr(rsp->rda);
3464 rdp->exp_done = true; 3670 rnp = rdp->mynode;
3465 if (atomic_dec_and_test(&rsp->expedited_need_qs)) 3671 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
3466 wake_up(&rsp->expedited_wq); 3672 __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
3467 return 0; 3673 return;
3674 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
3675 resched_cpu(smp_processor_id());
3676}
3677
3678/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
3679static void sync_sched_exp_online_cleanup(int cpu)
3680{
3681 struct rcu_data *rdp;
3682 int ret;
3683 struct rcu_node *rnp;
3684 struct rcu_state *rsp = &rcu_sched_state;
3685
3686 rdp = per_cpu_ptr(rsp->rda, cpu);
3687 rnp = rdp->mynode;
3688 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
3689 return;
3690 ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
3691 WARN_ON_ONCE(ret);
3692}
3693
3694/*
3695 * Select the nodes that the upcoming expedited grace period needs
3696 * to wait for.
3697 */
3698static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
3699 smp_call_func_t func)
3700{
3701 int cpu;
3702 unsigned long flags;
3703 unsigned long mask;
3704 unsigned long mask_ofl_test;
3705 unsigned long mask_ofl_ipi;
3706 int ret;
3707 struct rcu_node *rnp;
3708
3709 sync_exp_reset_tree(rsp);
3710 rcu_for_each_leaf_node(rsp, rnp) {
3711 raw_spin_lock_irqsave(&rnp->lock, flags);
3712 smp_mb__after_unlock_lock();
3713
3714 /* Each pass checks a CPU for identity, offline, and idle. */
3715 mask_ofl_test = 0;
3716 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
3717 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3718 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
3719
3720 if (raw_smp_processor_id() == cpu ||
3721 !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
3722 mask_ofl_test |= rdp->grpmask;
3723 }
3724 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
3725
3726 /*
3727 * Need to wait for any blocked tasks as well. Note that
3728 * additional blocking tasks will also block the expedited
3729 * GP until such time as the ->expmask bits are cleared.
3730 */
3731 if (rcu_preempt_has_tasks(rnp))
3732 rnp->exp_tasks = rnp->blkd_tasks.next;
3733 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3734
3735 /* IPI the remaining CPUs for expedited quiescent state. */
3736 mask = 1;
3737 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
3738 if (!(mask_ofl_ipi & mask))
3739 continue;
3740retry_ipi:
3741 ret = smp_call_function_single(cpu, func, rsp, 0);
3742 if (!ret) {
3743 mask_ofl_ipi &= ~mask;
3744 } else {
3745 /* Failed, raced with offline. */
3746 raw_spin_lock_irqsave(&rnp->lock, flags);
3747 if (cpu_online(cpu) &&
3748 (rnp->expmask & mask)) {
3749 raw_spin_unlock_irqrestore(&rnp->lock,
3750 flags);
3751 schedule_timeout_uninterruptible(1);
3752 if (cpu_online(cpu) &&
3753 (rnp->expmask & mask))
3754 goto retry_ipi;
3755 raw_spin_lock_irqsave(&rnp->lock,
3756 flags);
3757 }
3758 if (!(rnp->expmask & mask))
3759 mask_ofl_ipi &= ~mask;
3760 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3761 }
3762 }
3763 /* Report quiescent states for those that went offline. */
3764 mask_ofl_test |= mask_ofl_ipi;
3765 if (mask_ofl_test)
3766 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
3767 }
3468} 3768}
3469 3769
3470static void synchronize_sched_expedited_wait(struct rcu_state *rsp) 3770static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
@@ -3472,7 +3772,9 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
3472 int cpu; 3772 int cpu;
3473 unsigned long jiffies_stall; 3773 unsigned long jiffies_stall;
3474 unsigned long jiffies_start; 3774 unsigned long jiffies_start;
3475 struct rcu_data *rdp; 3775 unsigned long mask;
3776 struct rcu_node *rnp;
3777 struct rcu_node *rnp_root = rcu_get_root(rsp);
3476 int ret; 3778 int ret;
3477 3779
3478 jiffies_stall = rcu_jiffies_till_stall_check(); 3780 jiffies_stall = rcu_jiffies_till_stall_check();
@@ -3481,33 +3783,43 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
3481 for (;;) { 3783 for (;;) {
3482 ret = wait_event_interruptible_timeout( 3784 ret = wait_event_interruptible_timeout(
3483 rsp->expedited_wq, 3785 rsp->expedited_wq,
3484 !atomic_read(&rsp->expedited_need_qs), 3786 sync_rcu_preempt_exp_done(rnp_root),
3485 jiffies_stall); 3787 jiffies_stall);
3486 if (ret > 0) 3788 if (ret > 0)
3487 return; 3789 return;
3488 if (ret < 0) { 3790 if (ret < 0) {
3489 /* Hit a signal, disable CPU stall warnings. */ 3791 /* Hit a signal, disable CPU stall warnings. */
3490 wait_event(rsp->expedited_wq, 3792 wait_event(rsp->expedited_wq,
3491 !atomic_read(&rsp->expedited_need_qs)); 3793 sync_rcu_preempt_exp_done(rnp_root));
3492 return; 3794 return;
3493 } 3795 }
3494 pr_err("INFO: %s detected expedited stalls on CPUs: {", 3796 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
3495 rsp->name); 3797 rsp->name);
3496 for_each_online_cpu(cpu) { 3798 rcu_for_each_leaf_node(rsp, rnp) {
3497 rdp = per_cpu_ptr(rsp->rda, cpu); 3799 (void)rcu_print_task_exp_stall(rnp);
3498 3800 mask = 1;
3499 if (rdp->exp_done) 3801 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
3500 continue; 3802 struct rcu_data *rdp;
3501 pr_cont(" %d", cpu); 3803
3804 if (!(rnp->expmask & mask))
3805 continue;
3806 rdp = per_cpu_ptr(rsp->rda, cpu);
3807 pr_cont(" %d-%c%c%c", cpu,
3808 "O."[cpu_online(cpu)],
3809 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
3810 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
3811 }
3812 mask <<= 1;
3502 } 3813 }
3503 pr_cont(" } %lu jiffies s: %lu\n", 3814 pr_cont(" } %lu jiffies s: %lu\n",
3504 jiffies - jiffies_start, rsp->expedited_sequence); 3815 jiffies - jiffies_start, rsp->expedited_sequence);
3505 for_each_online_cpu(cpu) { 3816 rcu_for_each_leaf_node(rsp, rnp) {
3506 rdp = per_cpu_ptr(rsp->rda, cpu); 3817 mask = 1;
3507 3818 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
3508 if (rdp->exp_done) 3819 if (!(rnp->expmask & mask))
3509 continue; 3820 continue;
3510 dump_cpu_task(cpu); 3821 dump_cpu_task(cpu);
3822 }
3511 } 3823 }
3512 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; 3824 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
3513 } 3825 }
@@ -3531,7 +3843,6 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
3531 */ 3843 */
3532void synchronize_sched_expedited(void) 3844void synchronize_sched_expedited(void)
3533{ 3845{
3534 int cpu;
3535 unsigned long s; 3846 unsigned long s;
3536 struct rcu_node *rnp; 3847 struct rcu_node *rnp;
3537 struct rcu_state *rsp = &rcu_sched_state; 3848 struct rcu_state *rsp = &rcu_sched_state;
@@ -3539,48 +3850,16 @@ void synchronize_sched_expedited(void)
3539 /* Take a snapshot of the sequence number. */ 3850 /* Take a snapshot of the sequence number. */
3540 s = rcu_exp_gp_seq_snap(rsp); 3851 s = rcu_exp_gp_seq_snap(rsp);
3541 3852
3542 if (!try_get_online_cpus()) {
3543 /* CPU hotplug operation in flight, fall back to normal GP. */
3544 wait_rcu_gp(call_rcu_sched);
3545 atomic_long_inc(&rsp->expedited_normal);
3546 return;
3547 }
3548 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
3549
3550 rnp = exp_funnel_lock(rsp, s); 3853 rnp = exp_funnel_lock(rsp, s);
3551 if (rnp == NULL) { 3854 if (rnp == NULL)
3552 put_online_cpus();
3553 return; /* Someone else did our work for us. */ 3855 return; /* Someone else did our work for us. */
3554 }
3555 3856
3556 rcu_exp_gp_seq_start(rsp); 3857 rcu_exp_gp_seq_start(rsp);
3557 3858 sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
3558 /* Stop each CPU that is online, non-idle, and not us. */ 3859 synchronize_sched_expedited_wait(rsp);
3559 init_waitqueue_head(&rsp->expedited_wq);
3560 atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */
3561 for_each_online_cpu(cpu) {
3562 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3563 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
3564
3565 rdp->exp_done = false;
3566
3567 /* Skip our CPU and any idle CPUs. */
3568 if (raw_smp_processor_id() == cpu ||
3569 !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
3570 continue;
3571 atomic_inc(&rsp->expedited_need_qs);
3572 stop_one_cpu_nowait(cpu, synchronize_sched_expedited_cpu_stop,
3573 rdp, &rdp->exp_stop_work);
3574 }
3575
3576 /* Remove extra count and, if necessary, wait for CPUs to stop. */
3577 if (!atomic_dec_and_test(&rsp->expedited_need_qs))
3578 synchronize_sched_expedited_wait(rsp);
3579 3860
3580 rcu_exp_gp_seq_end(rsp); 3861 rcu_exp_gp_seq_end(rsp);
3581 mutex_unlock(&rnp->exp_funnel_mutex); 3862 mutex_unlock(&rnp->exp_funnel_mutex);
3582
3583 put_online_cpus();
3584} 3863}
3585EXPORT_SYMBOL_GPL(synchronize_sched_expedited); 3864EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
3586 3865
@@ -3606,11 +3885,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3606 3885
3607 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3886 /* Is the RCU core waiting for a quiescent state from this CPU? */
3608 if (rcu_scheduler_fully_active && 3887 if (rcu_scheduler_fully_active &&
3609 rdp->qs_pending && !rdp->passed_quiesce && 3888 rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
3610 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) { 3889 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
3611 rdp->n_rp_qs_pending++; 3890 rdp->n_rp_core_needs_qs++;
3612 } else if (rdp->qs_pending && 3891 } else if (rdp->core_needs_qs &&
3613 (rdp->passed_quiesce || 3892 (!rdp->cpu_no_qs.b.norm ||
3614 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) { 3893 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
3615 rdp->n_rp_report_qs++; 3894 rdp->n_rp_report_qs++;
3616 return 1; 3895 return 1;
@@ -3868,7 +4147,6 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3868static void __init 4147static void __init
3869rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) 4148rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3870{ 4149{
3871 static struct lock_class_key rcu_exp_sched_rdp_class;
3872 unsigned long flags; 4150 unsigned long flags;
3873 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 4151 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3874 struct rcu_node *rnp = rcu_get_root(rsp); 4152 struct rcu_node *rnp = rcu_get_root(rsp);
@@ -3884,10 +4162,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3884 mutex_init(&rdp->exp_funnel_mutex); 4162 mutex_init(&rdp->exp_funnel_mutex);
3885 rcu_boot_init_nocb_percpu_data(rdp); 4163 rcu_boot_init_nocb_percpu_data(rdp);
3886 raw_spin_unlock_irqrestore(&rnp->lock, flags); 4164 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3887 if (rsp == &rcu_sched_state)
3888 lockdep_set_class_and_name(&rdp->exp_funnel_mutex,
3889 &rcu_exp_sched_rdp_class,
3890 "rcu_data_exp_sched");
3891} 4165}
3892 4166
3893/* 4167/*
@@ -3906,7 +4180,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3906 4180
3907 /* Set up local state, ensuring consistent view of global state. */ 4181 /* Set up local state, ensuring consistent view of global state. */
3908 raw_spin_lock_irqsave(&rnp->lock, flags); 4182 raw_spin_lock_irqsave(&rnp->lock, flags);
3909 rdp->beenonline = 1; /* We have now been online. */
3910 rdp->qlen_last_fqs_check = 0; 4183 rdp->qlen_last_fqs_check = 0;
3911 rdp->n_force_qs_snap = rsp->n_force_qs; 4184 rdp->n_force_qs_snap = rsp->n_force_qs;
3912 rdp->blimit = blimit; 4185 rdp->blimit = blimit;
@@ -3928,11 +4201,15 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3928 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 4201 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
3929 smp_mb__after_unlock_lock(); 4202 smp_mb__after_unlock_lock();
3930 rnp->qsmaskinitnext |= mask; 4203 rnp->qsmaskinitnext |= mask;
4204 rnp->expmaskinitnext |= mask;
4205 if (!rdp->beenonline)
4206 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1);
4207 rdp->beenonline = true; /* We have now been online. */
3931 rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ 4208 rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
3932 rdp->completed = rnp->completed; 4209 rdp->completed = rnp->completed;
3933 rdp->passed_quiesce = false; 4210 rdp->cpu_no_qs.b.norm = true;
3934 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu); 4211 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
3935 rdp->qs_pending = false; 4212 rdp->core_needs_qs = false;
3936 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 4213 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3937 raw_spin_unlock_irqrestore(&rnp->lock, flags); 4214 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3938} 4215}
@@ -3965,6 +4242,7 @@ int rcu_cpu_notify(struct notifier_block *self,
3965 break; 4242 break;
3966 case CPU_ONLINE: 4243 case CPU_ONLINE:
3967 case CPU_DOWN_FAILED: 4244 case CPU_DOWN_FAILED:
4245 sync_sched_exp_online_cleanup(cpu);
3968 rcu_boost_kthread_setaffinity(rnp, -1); 4246 rcu_boost_kthread_setaffinity(rnp, -1);
3969 break; 4247 break;
3970 case CPU_DOWN_PREPARE: 4248 case CPU_DOWN_PREPARE:
@@ -3976,6 +4254,12 @@ int rcu_cpu_notify(struct notifier_block *self,
3976 rcu_cleanup_dying_cpu(rsp); 4254 rcu_cleanup_dying_cpu(rsp);
3977 break; 4255 break;
3978 case CPU_DYING_IDLE: 4256 case CPU_DYING_IDLE:
4257 /* QS for any half-done expedited RCU-sched GP. */
4258 preempt_disable();
4259 rcu_report_exp_rdp(&rcu_sched_state,
4260 this_cpu_ptr(rcu_sched_state.rda), true);
4261 preempt_enable();
4262
3979 for_each_rcu_flavor(rsp) { 4263 for_each_rcu_flavor(rsp) {
3980 rcu_cleanup_dying_idle_cpu(cpu, rsp); 4264 rcu_cleanup_dying_idle_cpu(cpu, rsp);
3981 } 4265 }
@@ -4107,7 +4391,6 @@ static void __init rcu_init_one(struct rcu_state *rsp,
4107 static const char * const buf[] = RCU_NODE_NAME_INIT; 4391 static const char * const buf[] = RCU_NODE_NAME_INIT;
4108 static const char * const fqs[] = RCU_FQS_NAME_INIT; 4392 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4109 static const char * const exp[] = RCU_EXP_NAME_INIT; 4393 static const char * const exp[] = RCU_EXP_NAME_INIT;
4110 static const char * const exp_sched[] = RCU_EXP_SCHED_NAME_INIT;
4111 static u8 fl_mask = 0x1; 4394 static u8 fl_mask = 0x1;
4112 4395
4113 int levelcnt[RCU_NUM_LVLS]; /* # nodes in each level. */ 4396 int levelcnt[RCU_NUM_LVLS]; /* # nodes in each level. */
@@ -4167,18 +4450,13 @@ static void __init rcu_init_one(struct rcu_state *rsp,
4167 INIT_LIST_HEAD(&rnp->blkd_tasks); 4450 INIT_LIST_HEAD(&rnp->blkd_tasks);
4168 rcu_init_one_nocb(rnp); 4451 rcu_init_one_nocb(rnp);
4169 mutex_init(&rnp->exp_funnel_mutex); 4452 mutex_init(&rnp->exp_funnel_mutex);
4170 if (rsp == &rcu_sched_state) 4453 lockdep_set_class_and_name(&rnp->exp_funnel_mutex,
4171 lockdep_set_class_and_name( 4454 &rcu_exp_class[i], exp[i]);
4172 &rnp->exp_funnel_mutex,
4173 &rcu_exp_sched_class[i], exp_sched[i]);
4174 else
4175 lockdep_set_class_and_name(
4176 &rnp->exp_funnel_mutex,
4177 &rcu_exp_class[i], exp[i]);
4178 } 4455 }
4179 } 4456 }
4180 4457
4181 init_waitqueue_head(&rsp->gp_wq); 4458 init_waitqueue_head(&rsp->gp_wq);
4459 init_waitqueue_head(&rsp->expedited_wq);
4182 rnp = rsp->level[rcu_num_lvls - 1]; 4460 rnp = rsp->level[rcu_num_lvls - 1];
4183 for_each_possible_cpu(i) { 4461 for_each_possible_cpu(i) {
4184 while (i > rnp->grphi) 4462 while (i > rnp->grphi)
@@ -4221,13 +4499,12 @@ static void __init rcu_init_geometry(void)
4221 rcu_fanout_leaf, nr_cpu_ids); 4499 rcu_fanout_leaf, nr_cpu_ids);
4222 4500
4223 /* 4501 /*
4224 * The boot-time rcu_fanout_leaf parameter is only permitted 4502 * The boot-time rcu_fanout_leaf parameter must be at least two
4225 * to increase the leaf-level fanout, not decrease it. Of course, 4503 * and cannot exceed the number of bits in the rcu_node masks.
4226 * the leaf-level fanout cannot exceed the number of bits in 4504 * Complain and fall back to the compile-time values if this
4227 * the rcu_node masks. Complain and fall back to the compile- 4505 * limit is exceeded.
4228 * time values if these limits are exceeded.
4229 */ 4506 */
4230 if (rcu_fanout_leaf < RCU_FANOUT_LEAF || 4507 if (rcu_fanout_leaf < 2 ||
4231 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 4508 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4232 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4509 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4233 WARN_ON(1); 4510 WARN_ON(1);
@@ -4244,10 +4521,13 @@ static void __init rcu_init_geometry(void)
4244 4521
4245 /* 4522 /*
4246 * The tree must be able to accommodate the configured number of CPUs. 4523 * The tree must be able to accommodate the configured number of CPUs.
4247 * If this limit is exceeded than we have a serious problem elsewhere. 4524 * If this limit is exceeded, fall back to the compile-time values.
4248 */ 4525 */
4249 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) 4526 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4250 panic("rcu_init_geometry: rcu_capacity[] is too small"); 4527 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4528 WARN_ON(1);
4529 return;
4530 }
4251 4531
4252 /* Calculate the number of levels in the tree. */ 4532 /* Calculate the number of levels in the tree. */
4253 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 4533 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 2e991f8361e4..9fb4e238d4dc 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -70,8 +70,6 @@
70# define RCU_NODE_NAME_INIT { "rcu_node_0" } 70# define RCU_NODE_NAME_INIT { "rcu_node_0" }
71# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" } 71# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" }
72# define RCU_EXP_NAME_INIT { "rcu_node_exp_0" } 72# define RCU_EXP_NAME_INIT { "rcu_node_exp_0" }
73# define RCU_EXP_SCHED_NAME_INIT \
74 { "rcu_node_exp_sched_0" }
75#elif NR_CPUS <= RCU_FANOUT_2 73#elif NR_CPUS <= RCU_FANOUT_2
76# define RCU_NUM_LVLS 2 74# define RCU_NUM_LVLS 2
77# define NUM_RCU_LVL_0 1 75# define NUM_RCU_LVL_0 1
@@ -81,8 +79,6 @@
81# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" } 79# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" }
82# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" } 80# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" }
83# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1" } 81# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1" }
84# define RCU_EXP_SCHED_NAME_INIT \
85 { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1" }
86#elif NR_CPUS <= RCU_FANOUT_3 82#elif NR_CPUS <= RCU_FANOUT_3
87# define RCU_NUM_LVLS 3 83# define RCU_NUM_LVLS 3
88# define NUM_RCU_LVL_0 1 84# define NUM_RCU_LVL_0 1
@@ -93,8 +89,6 @@
93# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" } 89# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
94# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" } 90# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
95# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" } 91# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
96# define RCU_EXP_SCHED_NAME_INIT \
97 { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1", "rcu_node_exp_sched_2" }
98#elif NR_CPUS <= RCU_FANOUT_4 92#elif NR_CPUS <= RCU_FANOUT_4
99# define RCU_NUM_LVLS 4 93# define RCU_NUM_LVLS 4
100# define NUM_RCU_LVL_0 1 94# define NUM_RCU_LVL_0 1
@@ -106,8 +100,6 @@
106# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" } 100# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
107# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" } 101# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
108# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" } 102# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
109# define RCU_EXP_SCHED_NAME_INIT \
110 { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1", "rcu_node_exp_sched_2", "rcu_node_exp_sched_3" }
111#else 103#else
112# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" 104# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
113#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ 105#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
@@ -171,16 +163,21 @@ struct rcu_node {
171 /* an rcu_data structure, otherwise, each */ 163 /* an rcu_data structure, otherwise, each */
172 /* bit corresponds to a child rcu_node */ 164 /* bit corresponds to a child rcu_node */
173 /* structure. */ 165 /* structure. */
174 unsigned long expmask; /* Groups that have ->blkd_tasks */
175 /* elements that need to drain to allow the */
176 /* current expedited grace period to */
177 /* complete (only for PREEMPT_RCU). */
178 unsigned long qsmaskinit; 166 unsigned long qsmaskinit;
179 /* Per-GP initial value for qsmask & expmask. */ 167 /* Per-GP initial value for qsmask. */
180 /* Initialized from ->qsmaskinitnext at the */ 168 /* Initialized from ->qsmaskinitnext at the */
181 /* beginning of each grace period. */ 169 /* beginning of each grace period. */
182 unsigned long qsmaskinitnext; 170 unsigned long qsmaskinitnext;
183 /* Online CPUs for next grace period. */ 171 /* Online CPUs for next grace period. */
172 unsigned long expmask; /* CPUs or groups that need to check in */
173 /* to allow the current expedited GP */
174 /* to complete. */
175 unsigned long expmaskinit;
176 /* Per-GP initial values for expmask. */
177 /* Initialized from ->expmaskinitnext at the */
178 /* beginning of each expedited GP. */
179 unsigned long expmaskinitnext;
180 /* Online CPUs for next expedited GP. */
184 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 181 unsigned long grpmask; /* Mask to apply to parent qsmask. */
185 /* Only one bit will be set in this mask. */ 182 /* Only one bit will be set in this mask. */
186 int grplo; /* lowest-numbered CPU or group here. */ 183 int grplo; /* lowest-numbered CPU or group here. */
@@ -281,6 +278,18 @@ struct rcu_node {
281 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \ 278 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
282 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 279 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
283 280
281/*
282 * Union to allow "aggregate OR" operation on the need for a quiescent
283 * state by the normal and expedited grace periods.
284 */
285union rcu_noqs {
286 struct {
287 u8 norm;
288 u8 exp;
289 } b; /* Bits. */
290 u16 s; /* Set of bits, aggregate OR here. */
291};
292
284/* Index values for nxttail array in struct rcu_data. */ 293/* Index values for nxttail array in struct rcu_data. */
285#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ 294#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
286#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ 295#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
@@ -297,8 +306,8 @@ struct rcu_data {
297 /* is aware of having started. */ 306 /* is aware of having started. */
298 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ 307 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
299 /* for rcu_all_qs() invocations. */ 308 /* for rcu_all_qs() invocations. */
300 bool passed_quiesce; /* User-mode/idle loop etc. */ 309 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
301 bool qs_pending; /* Core waits for quiesc state. */ 310 bool core_needs_qs; /* Core waits for quiesc state. */
302 bool beenonline; /* CPU online at least once. */ 311 bool beenonline; /* CPU online at least once. */
303 bool gpwrap; /* Possible gpnum/completed wrap. */ 312 bool gpwrap; /* Possible gpnum/completed wrap. */
304 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 313 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
@@ -307,9 +316,6 @@ struct rcu_data {
307 /* ticks this CPU has handled */ 316 /* ticks this CPU has handled */
308 /* during and after the last grace */ 317 /* during and after the last grace */
309 /* period it is aware of. */ 318 /* period it is aware of. */
310 struct cpu_stop_work exp_stop_work;
311 /* Expedited grace-period control */
312 /* for CPU stopping. */
313 319
314 /* 2) batch handling */ 320 /* 2) batch handling */
315 /* 321 /*
@@ -363,7 +369,7 @@ struct rcu_data {
363 369
364 /* 5) __rcu_pending() statistics. */ 370 /* 5) __rcu_pending() statistics. */
365 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 371 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
366 unsigned long n_rp_qs_pending; 372 unsigned long n_rp_core_needs_qs;
367 unsigned long n_rp_report_qs; 373 unsigned long n_rp_report_qs;
368 unsigned long n_rp_cb_ready; 374 unsigned long n_rp_cb_ready;
369 unsigned long n_rp_cpu_needs_gp; 375 unsigned long n_rp_cpu_needs_gp;
@@ -378,7 +384,6 @@ struct rcu_data {
378 struct rcu_head oom_head; 384 struct rcu_head oom_head;
379#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 385#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
380 struct mutex exp_funnel_mutex; 386 struct mutex exp_funnel_mutex;
381 bool exp_done; /* Expedited QS for this CPU? */
382 387
383 /* 7) Callback offloading. */ 388 /* 7) Callback offloading. */
384#ifdef CONFIG_RCU_NOCB_CPU 389#ifdef CONFIG_RCU_NOCB_CPU
@@ -412,13 +417,6 @@ struct rcu_data {
412 struct rcu_state *rsp; 417 struct rcu_state *rsp;
413}; 418};
414 419
415/* Values for fqs_state field in struct rcu_state. */
416#define RCU_GP_IDLE 0 /* No grace period in progress. */
417#define RCU_GP_INIT 1 /* Grace period being initialized. */
418#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
419#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
420#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
421
422/* Values for nocb_defer_wakeup field in struct rcu_data. */ 420/* Values for nocb_defer_wakeup field in struct rcu_data. */
423#define RCU_NOGP_WAKE_NOT 0 421#define RCU_NOGP_WAKE_NOT 0
424#define RCU_NOGP_WAKE 1 422#define RCU_NOGP_WAKE 1
@@ -464,14 +462,13 @@ struct rcu_state {
464 /* shut bogus gcc warning) */ 462 /* shut bogus gcc warning) */
465 u8 flavor_mask; /* bit in flavor mask. */ 463 u8 flavor_mask; /* bit in flavor mask. */
466 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 464 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
467 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ 465 call_rcu_func_t call; /* call_rcu() flavor. */
468 void (*func)(struct rcu_head *head)); 466 int ncpus; /* # CPUs seen so far. */
469 467
470 /* The following fields are guarded by the root rcu_node's lock. */ 468 /* The following fields are guarded by the root rcu_node's lock. */
471 469
472 u8 fqs_state ____cacheline_internodealigned_in_smp; 470 u8 boost ____cacheline_internodealigned_in_smp;
473 /* Force QS state. */ 471 /* Subject to priority boost. */
474 u8 boost; /* Subject to priority boost. */
475 unsigned long gpnum; /* Current gp number. */ 472 unsigned long gpnum; /* Current gp number. */
476 unsigned long completed; /* # of last completed gp. */ 473 unsigned long completed; /* # of last completed gp. */
477 struct task_struct *gp_kthread; /* Task for grace periods. */ 474 struct task_struct *gp_kthread; /* Task for grace periods. */
@@ -508,6 +505,7 @@ struct rcu_state {
508 atomic_long_t expedited_normal; /* # fallbacks to normal. */ 505 atomic_long_t expedited_normal; /* # fallbacks to normal. */
509 atomic_t expedited_need_qs; /* # CPUs left to check in. */ 506 atomic_t expedited_need_qs; /* # CPUs left to check in. */
510 wait_queue_head_t expedited_wq; /* Wait for check-ins. */ 507 wait_queue_head_t expedited_wq; /* Wait for check-ins. */
508 int ncpus_snap; /* # CPUs seen last time. */
511 509
512 unsigned long jiffies_force_qs; /* Time at which to invoke */ 510 unsigned long jiffies_force_qs; /* Time at which to invoke */
513 /* force_quiescent_state(). */ 511 /* force_quiescent_state(). */
@@ -538,8 +536,8 @@ struct rcu_state {
538#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ 536#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
539#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ 537#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
540 538
541/* Values for rcu_state structure's gp_flags field. */ 539/* Values for rcu_state structure's gp_state field. */
542#define RCU_GP_WAIT_INIT 0 /* Initial state. */ 540#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
543#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ 541#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
544#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ 542#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
545#define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */ 543#define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */
@@ -582,9 +580,10 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
582#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 580#endif /* #ifdef CONFIG_HOTPLUG_CPU */
583static void rcu_print_detail_task_stall(struct rcu_state *rsp); 581static void rcu_print_detail_task_stall(struct rcu_state *rsp);
584static int rcu_print_task_stall(struct rcu_node *rnp); 582static int rcu_print_task_stall(struct rcu_node *rnp);
583static int rcu_print_task_exp_stall(struct rcu_node *rnp);
585static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 584static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
586static void rcu_preempt_check_callbacks(void); 585static void rcu_preempt_check_callbacks(void);
587void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 586void call_rcu(struct rcu_head *head, rcu_callback_t func);
588static void __init __rcu_init_preempt(void); 587static void __init __rcu_init_preempt(void);
589static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 588static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
590static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 589static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index b2bf3963a0ae..630c19772630 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -101,7 +101,6 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
101static struct rcu_state *const rcu_state_p = &rcu_preempt_state; 101static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
102static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data; 102static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
103 103
104static int rcu_preempted_readers_exp(struct rcu_node *rnp);
105static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 104static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
106 bool wake); 105 bool wake);
107 106
@@ -114,6 +113,147 @@ static void __init rcu_bootup_announce(void)
114 rcu_bootup_announce_oddness(); 113 rcu_bootup_announce_oddness();
115} 114}
116 115
116/* Flags for rcu_preempt_ctxt_queue() decision table. */
117#define RCU_GP_TASKS 0x8
118#define RCU_EXP_TASKS 0x4
119#define RCU_GP_BLKD 0x2
120#define RCU_EXP_BLKD 0x1
121
122/*
123 * Queues a task preempted within an RCU-preempt read-side critical
124 * section into the appropriate location within the ->blkd_tasks list,
125 * depending on the states of any ongoing normal and expedited grace
126 * periods. The ->gp_tasks pointer indicates which element the normal
127 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
128 * indicates which element the expedited grace period is waiting on (again,
129 * NULL if none). If a grace period is waiting on a given element in the
130 * ->blkd_tasks list, it also waits on all subsequent elements. Thus,
131 * adding a task to the tail of the list blocks any grace period that is
132 * already waiting on one of the elements. In contrast, adding a task
133 * to the head of the list won't block any grace period that is already
134 * waiting on one of the elements.
135 *
136 * This queuing is imprecise, and can sometimes make an ongoing grace
137 * period wait for a task that is not strictly speaking blocking it.
138 * Given the choice, we needlessly block a normal grace period rather than
139 * blocking an expedited grace period.
140 *
141 * Note that an endless sequence of expedited grace periods still cannot
142 * indefinitely postpone a normal grace period. Eventually, all of the
143 * fixed number of preempted tasks blocking the normal grace period that are
144 * not also blocking the expedited grace period will resume and complete
145 * their RCU read-side critical sections. At that point, the ->gp_tasks
146 * pointer will equal the ->exp_tasks pointer, at which point the end of
147 * the corresponding expedited grace period will also be the end of the
148 * normal grace period.
149 */
150static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
151 unsigned long flags) __releases(rnp->lock)
152{
153 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
154 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
155 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
156 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
157 struct task_struct *t = current;
158
159 /*
160 * Decide where to queue the newly blocked task. In theory,
161 * this could be an if-statement. In practice, when I tried
162 * that, it was quite messy.
163 */
164 switch (blkd_state) {
165 case 0:
166 case RCU_EXP_TASKS:
167 case RCU_EXP_TASKS + RCU_GP_BLKD:
168 case RCU_GP_TASKS:
169 case RCU_GP_TASKS + RCU_EXP_TASKS:
170
171 /*
172 * Blocking neither GP, or first task blocking the normal
173 * GP but not blocking the already-waiting expedited GP.
174 * Queue at the head of the list to avoid unnecessarily
175 * blocking the already-waiting GPs.
176 */
177 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
178 break;
179
180 case RCU_EXP_BLKD:
181 case RCU_GP_BLKD:
182 case RCU_GP_BLKD + RCU_EXP_BLKD:
183 case RCU_GP_TASKS + RCU_EXP_BLKD:
184 case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
185 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
186
187 /*
188 * First task arriving that blocks either GP, or first task
189 * arriving that blocks the expedited GP (with the normal
190 * GP already waiting), or a task arriving that blocks
191 * both GPs with both GPs already waiting. Queue at the
192 * tail of the list to avoid any GP waiting on any of the
193 * already queued tasks that are not blocking it.
194 */
195 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
196 break;
197
198 case RCU_EXP_TASKS + RCU_EXP_BLKD:
199 case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
200 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD:
201
202 /*
203 * Second or subsequent task blocking the expedited GP.
204 * The task either does not block the normal GP, or is the
205 * first task blocking the normal GP. Queue just after
206 * the first task blocking the expedited GP.
207 */
208 list_add(&t->rcu_node_entry, rnp->exp_tasks);
209 break;
210
211 case RCU_GP_TASKS + RCU_GP_BLKD:
212 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
213
214 /*
215 * Second or subsequent task blocking the normal GP.
216 * The task does not block the expedited GP. Queue just
217 * after the first task blocking the normal GP.
218 */
219 list_add(&t->rcu_node_entry, rnp->gp_tasks);
220 break;
221
222 default:
223
224 /* Yet another exercise in excessive paranoia. */
225 WARN_ON_ONCE(1);
226 break;
227 }
228
229 /*
230 * We have now queued the task. If it was the first one to
231 * block either grace period, update the ->gp_tasks and/or
232 * ->exp_tasks pointers, respectively, to reference the newly
233 * blocked tasks.
234 */
235 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
236 rnp->gp_tasks = &t->rcu_node_entry;
237 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
238 rnp->exp_tasks = &t->rcu_node_entry;
239 raw_spin_unlock(&rnp->lock);
240
241 /*
242 * Report the quiescent state for the expedited GP. This expedited
243 * GP should not be able to end until we report, so there should be
244 * no need to check for a subsequent expedited GP. (Though we are
245 * still in a quiescent state in any case.)
246 */
247 if (blkd_state & RCU_EXP_BLKD &&
248 t->rcu_read_unlock_special.b.exp_need_qs) {
249 t->rcu_read_unlock_special.b.exp_need_qs = false;
250 rcu_report_exp_rdp(rdp->rsp, rdp, true);
251 } else {
252 WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
253 }
254 local_irq_restore(flags);
255}
256
117/* 257/*
118 * Record a preemptible-RCU quiescent state for the specified CPU. Note 258 * Record a preemptible-RCU quiescent state for the specified CPU. Note
119 * that this just means that the task currently running on the CPU is 259 * that this just means that the task currently running on the CPU is
@@ -125,11 +265,11 @@ static void __init rcu_bootup_announce(void)
125 */ 265 */
126static void rcu_preempt_qs(void) 266static void rcu_preempt_qs(void)
127{ 267{
128 if (!__this_cpu_read(rcu_data_p->passed_quiesce)) { 268 if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
129 trace_rcu_grace_period(TPS("rcu_preempt"), 269 trace_rcu_grace_period(TPS("rcu_preempt"),
130 __this_cpu_read(rcu_data_p->gpnum), 270 __this_cpu_read(rcu_data_p->gpnum),
131 TPS("cpuqs")); 271 TPS("cpuqs"));
132 __this_cpu_write(rcu_data_p->passed_quiesce, 1); 272 __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
133 barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */ 273 barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
134 current->rcu_read_unlock_special.b.need_qs = false; 274 current->rcu_read_unlock_special.b.need_qs = false;
135 } 275 }
@@ -167,42 +307,18 @@ static void rcu_preempt_note_context_switch(void)
167 t->rcu_blocked_node = rnp; 307 t->rcu_blocked_node = rnp;
168 308
169 /* 309 /*
170 * If this CPU has already checked in, then this task 310 * Verify the CPU's sanity, trace the preemption, and
171 * will hold up the next grace period rather than the 311 * then queue the task as required based on the states
172 * current grace period. Queue the task accordingly. 312 * of any ongoing and expedited grace periods.
173 * If the task is queued for the current grace period
174 * (i.e., this CPU has not yet passed through a quiescent
175 * state for the current grace period), then as long
176 * as that task remains queued, the current grace period
177 * cannot end. Note that there is some uncertainty as
178 * to exactly when the current grace period started.
179 * We take a conservative approach, which can result
180 * in unnecessarily waiting on tasks that started very
181 * slightly after the current grace period began. C'est
182 * la vie!!!
183 *
184 * But first, note that the current CPU must still be
185 * on line!
186 */ 313 */
187 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); 314 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
188 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 315 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
189 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
190 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
191 rnp->gp_tasks = &t->rcu_node_entry;
192 if (IS_ENABLED(CONFIG_RCU_BOOST) &&
193 rnp->boost_tasks != NULL)
194 rnp->boost_tasks = rnp->gp_tasks;
195 } else {
196 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
197 if (rnp->qsmask & rdp->grpmask)
198 rnp->gp_tasks = &t->rcu_node_entry;
199 }
200 trace_rcu_preempt_task(rdp->rsp->name, 316 trace_rcu_preempt_task(rdp->rsp->name,
201 t->pid, 317 t->pid,
202 (rnp->qsmask & rdp->grpmask) 318 (rnp->qsmask & rdp->grpmask)
203 ? rnp->gpnum 319 ? rnp->gpnum
204 : rnp->gpnum + 1); 320 : rnp->gpnum + 1);
205 raw_spin_unlock_irqrestore(&rnp->lock, flags); 321 rcu_preempt_ctxt_queue(rnp, rdp, flags);
206 } else if (t->rcu_read_lock_nesting < 0 && 322 } else if (t->rcu_read_lock_nesting < 0 &&
207 t->rcu_read_unlock_special.s) { 323 t->rcu_read_unlock_special.s) {
208 324
@@ -272,6 +388,7 @@ void rcu_read_unlock_special(struct task_struct *t)
272 unsigned long flags; 388 unsigned long flags;
273 struct list_head *np; 389 struct list_head *np;
274 bool drop_boost_mutex = false; 390 bool drop_boost_mutex = false;
391 struct rcu_data *rdp;
275 struct rcu_node *rnp; 392 struct rcu_node *rnp;
276 union rcu_special special; 393 union rcu_special special;
277 394
@@ -282,8 +399,8 @@ void rcu_read_unlock_special(struct task_struct *t)
282 local_irq_save(flags); 399 local_irq_save(flags);
283 400
284 /* 401 /*
285 * If RCU core is waiting for this CPU to exit critical section, 402 * If RCU core is waiting for this CPU to exit its critical section,
286 * let it know that we have done so. Because irqs are disabled, 403 * report the fact that it has exited. Because irqs are disabled,
287 * t->rcu_read_unlock_special cannot change. 404 * t->rcu_read_unlock_special cannot change.
288 */ 405 */
289 special = t->rcu_read_unlock_special; 406 special = t->rcu_read_unlock_special;
@@ -296,13 +413,32 @@ void rcu_read_unlock_special(struct task_struct *t)
296 } 413 }
297 } 414 }
298 415
416 /*
417 * Respond to a request for an expedited grace period, but only if
418 * we were not preempted, meaning that we were running on the same
419 * CPU throughout. If we were preempted, the exp_need_qs flag
420 * would have been cleared at the time of the first preemption,
421 * and the quiescent state would be reported when we were dequeued.
422 */
423 if (special.b.exp_need_qs) {
424 WARN_ON_ONCE(special.b.blocked);
425 t->rcu_read_unlock_special.b.exp_need_qs = false;
426 rdp = this_cpu_ptr(rcu_state_p->rda);
427 rcu_report_exp_rdp(rcu_state_p, rdp, true);
428 if (!t->rcu_read_unlock_special.s) {
429 local_irq_restore(flags);
430 return;
431 }
432 }
433
299 /* Hardware IRQ handlers cannot block, complain if they get here. */ 434 /* Hardware IRQ handlers cannot block, complain if they get here. */
300 if (in_irq() || in_serving_softirq()) { 435 if (in_irq() || in_serving_softirq()) {
301 lockdep_rcu_suspicious(__FILE__, __LINE__, 436 lockdep_rcu_suspicious(__FILE__, __LINE__,
302 "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); 437 "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
303 pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n", 438 pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
304 t->rcu_read_unlock_special.s, 439 t->rcu_read_unlock_special.s,
305 t->rcu_read_unlock_special.b.blocked, 440 t->rcu_read_unlock_special.b.blocked,
441 t->rcu_read_unlock_special.b.exp_need_qs,
306 t->rcu_read_unlock_special.b.need_qs); 442 t->rcu_read_unlock_special.b.need_qs);
307 local_irq_restore(flags); 443 local_irq_restore(flags);
308 return; 444 return;
@@ -329,7 +465,7 @@ void rcu_read_unlock_special(struct task_struct *t)
329 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 465 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
330 } 466 }
331 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 467 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
332 empty_exp = !rcu_preempted_readers_exp(rnp); 468 empty_exp = sync_rcu_preempt_exp_done(rnp);
333 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 469 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
334 np = rcu_next_node_entry(t, rnp); 470 np = rcu_next_node_entry(t, rnp);
335 list_del_init(&t->rcu_node_entry); 471 list_del_init(&t->rcu_node_entry);
@@ -353,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t)
353 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 489 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
354 * so we must take a snapshot of the expedited state. 490 * so we must take a snapshot of the expedited state.
355 */ 491 */
356 empty_exp_now = !rcu_preempted_readers_exp(rnp); 492 empty_exp_now = sync_rcu_preempt_exp_done(rnp);
357 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { 493 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
358 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 494 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
359 rnp->gpnum, 495 rnp->gpnum,
@@ -450,6 +586,27 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
450} 586}
451 587
452/* 588/*
589 * Scan the current list of tasks blocked within RCU read-side critical
590 * sections, printing out the tid of each that is blocking the current
591 * expedited grace period.
592 */
593static int rcu_print_task_exp_stall(struct rcu_node *rnp)
594{
595 struct task_struct *t;
596 int ndetected = 0;
597
598 if (!rnp->exp_tasks)
599 return 0;
600 t = list_entry(rnp->exp_tasks->prev,
601 struct task_struct, rcu_node_entry);
602 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
603 pr_cont(" P%d", t->pid);
604 ndetected++;
605 }
606 return ndetected;
607}
608
609/*
453 * Check that the list of blocked tasks for the newly completed grace 610 * Check that the list of blocked tasks for the newly completed grace
454 * period is in fact empty. It is a serious bug to complete a grace 611 * period is in fact empty. It is a serious bug to complete a grace
455 * period that still has RCU readers blocked! This function must be 612 * period that still has RCU readers blocked! This function must be
@@ -483,8 +640,8 @@ static void rcu_preempt_check_callbacks(void)
483 return; 640 return;
484 } 641 }
485 if (t->rcu_read_lock_nesting > 0 && 642 if (t->rcu_read_lock_nesting > 0 &&
486 __this_cpu_read(rcu_data_p->qs_pending) && 643 __this_cpu_read(rcu_data_p->core_needs_qs) &&
487 !__this_cpu_read(rcu_data_p->passed_quiesce)) 644 __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
488 t->rcu_read_unlock_special.b.need_qs = true; 645 t->rcu_read_unlock_special.b.need_qs = true;
489} 646}
490 647
@@ -500,7 +657,7 @@ static void rcu_preempt_do_callbacks(void)
500/* 657/*
501 * Queue a preemptible-RCU callback for invocation after a grace period. 658 * Queue a preemptible-RCU callback for invocation after a grace period.
502 */ 659 */
503void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 660void call_rcu(struct rcu_head *head, rcu_callback_t func)
504{ 661{
505 __call_rcu(head, func, rcu_state_p, -1, 0); 662 __call_rcu(head, func, rcu_state_p, -1, 0);
506} 663}
@@ -535,155 +692,41 @@ void synchronize_rcu(void)
535} 692}
536EXPORT_SYMBOL_GPL(synchronize_rcu); 693EXPORT_SYMBOL_GPL(synchronize_rcu);
537 694
538static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
539
540/*
541 * Return non-zero if there are any tasks in RCU read-side critical
542 * sections blocking the current preemptible-RCU expedited grace period.
543 * If there is no preemptible-RCU expedited grace period currently in
544 * progress, returns zero unconditionally.
545 */
546static int rcu_preempted_readers_exp(struct rcu_node *rnp)
547{
548 return rnp->exp_tasks != NULL;
549}
550
551/*
552 * return non-zero if there is no RCU expedited grace period in progress
553 * for the specified rcu_node structure, in other words, if all CPUs and
554 * tasks covered by the specified rcu_node structure have done their bit
555 * for the current expedited grace period. Works only for preemptible
556 * RCU -- other RCU implementation use other means.
557 *
558 * Caller must hold the root rcu_node's exp_funnel_mutex.
559 */
560static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
561{
562 return !rcu_preempted_readers_exp(rnp) &&
563 READ_ONCE(rnp->expmask) == 0;
564}
565
566/*
567 * Report the exit from RCU read-side critical section for the last task
568 * that queued itself during or before the current expedited preemptible-RCU
569 * grace period. This event is reported either to the rcu_node structure on
570 * which the task was queued or to one of that rcu_node structure's ancestors,
571 * recursively up the tree. (Calm down, calm down, we do the recursion
572 * iteratively!)
573 *
574 * Caller must hold the root rcu_node's exp_funnel_mutex.
575 */
576static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
577 bool wake)
578{
579 unsigned long flags;
580 unsigned long mask;
581
582 raw_spin_lock_irqsave(&rnp->lock, flags);
583 smp_mb__after_unlock_lock();
584 for (;;) {
585 if (!sync_rcu_preempt_exp_done(rnp)) {
586 raw_spin_unlock_irqrestore(&rnp->lock, flags);
587 break;
588 }
589 if (rnp->parent == NULL) {
590 raw_spin_unlock_irqrestore(&rnp->lock, flags);
591 if (wake) {
592 smp_mb(); /* EGP done before wake_up(). */
593 wake_up(&sync_rcu_preempt_exp_wq);
594 }
595 break;
596 }
597 mask = rnp->grpmask;
598 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
599 rnp = rnp->parent;
600 raw_spin_lock(&rnp->lock); /* irqs already disabled */
601 smp_mb__after_unlock_lock();
602 rnp->expmask &= ~mask;
603 }
604}
605
606/* 695/*
607 * Snapshot the tasks blocking the newly started preemptible-RCU expedited 696 * Remote handler for smp_call_function_single(). If there is an
608 * grace period for the specified rcu_node structure, phase 1. If there 697 * RCU read-side critical section in effect, request that the
609 * are such tasks, set the ->expmask bits up the rcu_node tree and also 698 * next rcu_read_unlock() record the quiescent state up the
610 * set the ->expmask bits on the leaf rcu_node structures to tell phase 2 699 * ->expmask fields in the rcu_node tree. Otherwise, immediately
611 * that work is needed here. 700 * report the quiescent state.
612 *
613 * Caller must hold the root rcu_node's exp_funnel_mutex.
614 */ 701 */
615static void 702static void sync_rcu_exp_handler(void *info)
616sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp)
617{ 703{
618 unsigned long flags; 704 struct rcu_data *rdp;
619 unsigned long mask; 705 struct rcu_state *rsp = info;
620 struct rcu_node *rnp_up; 706 struct task_struct *t = current;
621
622 raw_spin_lock_irqsave(&rnp->lock, flags);
623 smp_mb__after_unlock_lock();
624 WARN_ON_ONCE(rnp->expmask);
625 WARN_ON_ONCE(rnp->exp_tasks);
626 if (!rcu_preempt_has_tasks(rnp)) {
627 /* No blocked tasks, nothing to do. */
628 raw_spin_unlock_irqrestore(&rnp->lock, flags);
629 return;
630 }
631 /* Call for Phase 2 and propagate ->expmask bits up the tree. */
632 rnp->expmask = 1;
633 rnp_up = rnp;
634 while (rnp_up->parent) {
635 mask = rnp_up->grpmask;
636 rnp_up = rnp_up->parent;
637 if (rnp_up->expmask & mask)
638 break;
639 raw_spin_lock(&rnp_up->lock); /* irqs already off */
640 smp_mb__after_unlock_lock();
641 rnp_up->expmask |= mask;
642 raw_spin_unlock(&rnp_up->lock); /* irqs still off */
643 }
644 raw_spin_unlock_irqrestore(&rnp->lock, flags);
645}
646
647/*
648 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
649 * grace period for the specified rcu_node structure, phase 2. If the
650 * leaf rcu_node structure has its ->expmask field set, check for tasks.
651 * If there are some, clear ->expmask and set ->exp_tasks accordingly,
652 * then initiate RCU priority boosting. Otherwise, clear ->expmask and
653 * invoke rcu_report_exp_rnp() to clear out the upper-level ->expmask bits,
654 * enabling rcu_read_unlock_special() to do the bit-clearing.
655 *
656 * Caller must hold the root rcu_node's exp_funnel_mutex.
657 */
658static void
659sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp)
660{
661 unsigned long flags;
662
663 raw_spin_lock_irqsave(&rnp->lock, flags);
664 smp_mb__after_unlock_lock();
665 if (!rnp->expmask) {
666 /* Phase 1 didn't do anything, so Phase 2 doesn't either. */
667 raw_spin_unlock_irqrestore(&rnp->lock, flags);
668 return;
669 }
670
671 /* Phase 1 is over. */
672 rnp->expmask = 0;
673 707
674 /* 708 /*
675 * If there are still blocked tasks, set up ->exp_tasks so that 709 * Within an RCU read-side critical section, request that the next
676 * rcu_read_unlock_special() will wake us and then boost them. 710 * rcu_read_unlock() report. Unless this RCU read-side critical
711 * section has already blocked, in which case it is already set
712 * up for the expedited grace period to wait on it.
677 */ 713 */
678 if (rcu_preempt_has_tasks(rnp)) { 714 if (t->rcu_read_lock_nesting > 0 &&
679 rnp->exp_tasks = rnp->blkd_tasks.next; 715 !t->rcu_read_unlock_special.b.blocked) {
680 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ 716 t->rcu_read_unlock_special.b.exp_need_qs = true;
681 return; 717 return;
682 } 718 }
683 719
684 /* No longer any blocked tasks, so undo bit setting. */ 720 /*
685 raw_spin_unlock_irqrestore(&rnp->lock, flags); 721 * We are either exiting an RCU read-side critical section (negative
686 rcu_report_exp_rnp(rsp, rnp, false); 722 * values of t->rcu_read_lock_nesting) or are not in one at all
723 * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
724 * read-side critical section that blocked before this expedited
725 * grace period started. Either way, we can immediately report
726 * the quiescent state.
727 */
728 rdp = this_cpu_ptr(rsp->rda);
729 rcu_report_exp_rdp(rsp, rdp, true);
687} 730}
688 731
689/** 732/**
@@ -713,24 +756,12 @@ void synchronize_rcu_expedited(void)
713 756
714 rcu_exp_gp_seq_start(rsp); 757 rcu_exp_gp_seq_start(rsp);
715 758
716 /* force all RCU readers onto ->blkd_tasks lists. */ 759 /* Initialize the rcu_node tree in preparation for the wait. */
717 synchronize_sched_expedited(); 760 sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
718
719 /*
720 * Snapshot current state of ->blkd_tasks lists into ->expmask.
721 * Phase 1 sets bits and phase 2 permits rcu_read_unlock_special()
722 * to start clearing them. Doing this in one phase leads to
723 * strange races between setting and clearing bits, so just say "no"!
724 */
725 rcu_for_each_leaf_node(rsp, rnp)
726 sync_rcu_preempt_exp_init1(rsp, rnp);
727 rcu_for_each_leaf_node(rsp, rnp)
728 sync_rcu_preempt_exp_init2(rsp, rnp);
729 761
730 /* Wait for snapshotted ->blkd_tasks lists to drain. */ 762 /* Wait for snapshotted ->blkd_tasks lists to drain. */
731 rnp = rcu_get_root(rsp); 763 rnp = rcu_get_root(rsp);
732 wait_event(sync_rcu_preempt_exp_wq, 764 synchronize_sched_expedited_wait(rsp);
733 sync_rcu_preempt_exp_done(rnp));
734 765
735 /* Clean up and exit. */ 766 /* Clean up and exit. */
736 rcu_exp_gp_seq_end(rsp); 767 rcu_exp_gp_seq_end(rsp);
@@ -835,6 +866,16 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
835} 866}
836 867
837/* 868/*
869 * Because preemptible RCU does not exist, we never have to check for
870 * tasks blocked within RCU read-side critical sections that are
871 * blocking the current expedited grace period.
872 */
873static int rcu_print_task_exp_stall(struct rcu_node *rnp)
874{
875 return 0;
876}
877
878/*
838 * Because there is no preemptible RCU, there can be no readers blocked, 879 * Because there is no preemptible RCU, there can be no readers blocked,
839 * so there is no need to check for blocked tasks. So check only for 880 * so there is no need to check for blocked tasks. So check only for
840 * bogus qsmask values. 881 * bogus qsmask values.
@@ -1702,8 +1743,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1702 ticks_value = rsp->gpnum - rdp->gpnum; 1743 ticks_value = rsp->gpnum - rdp->gpnum;
1703 } 1744 }
1704 print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 1745 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1705 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n", 1746 pr_err("\t%d-%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
1706 cpu, ticks_value, ticks_title, 1747 cpu,
1748 "O."[!!cpu_online(cpu)],
1749 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
1750 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
1751 ticks_value, ticks_title,
1707 atomic_read(&rdtp->dynticks) & 0xfff, 1752 atomic_read(&rdtp->dynticks) & 0xfff,
1708 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, 1753 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1709 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 1754 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 6fc4c5ff3bb5..ef7093cc9b5c 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -117,13 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
117 117
118 if (!rdp->beenonline) 118 if (!rdp->beenonline)
119 return; 119 return;
120 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d", 120 seq_printf(m, "%3d%cc=%ld g=%ld cnq=%d/%d:%d",
121 rdp->cpu, 121 rdp->cpu,
122 cpu_is_offline(rdp->cpu) ? '!' : ' ', 122 cpu_is_offline(rdp->cpu) ? '!' : ' ',
123 ulong2long(rdp->completed), ulong2long(rdp->gpnum), 123 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
124 rdp->passed_quiesce, 124 rdp->cpu_no_qs.b.norm,
125 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu), 125 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
126 rdp->qs_pending); 126 rdp->core_needs_qs);
127 seq_printf(m, " dt=%d/%llx/%d df=%lu", 127 seq_printf(m, " dt=%d/%llx/%d df=%lu",
128 atomic_read(&rdp->dynticks->dynticks), 128 atomic_read(&rdp->dynticks->dynticks),
129 rdp->dynticks->dynticks_nesting, 129 rdp->dynticks->dynticks_nesting,
@@ -268,7 +268,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
268 gpnum = rsp->gpnum; 268 gpnum = rsp->gpnum;
269 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x ", 269 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x ",
270 ulong2long(rsp->completed), ulong2long(gpnum), 270 ulong2long(rsp->completed), ulong2long(gpnum),
271 rsp->fqs_state, 271 rsp->gp_state,
272 (long)(rsp->jiffies_force_qs - jiffies), 272 (long)(rsp->jiffies_force_qs - jiffies),
273 (int)(jiffies & 0xffff)); 273 (int)(jiffies & 0xffff));
274 seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", 274 seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
@@ -361,7 +361,7 @@ static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
361 cpu_is_offline(rdp->cpu) ? '!' : ' ', 361 cpu_is_offline(rdp->cpu) ? '!' : ' ',
362 rdp->n_rcu_pending); 362 rdp->n_rcu_pending);
363 seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ", 363 seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ",
364 rdp->n_rp_qs_pending, 364 rdp->n_rp_core_needs_qs,
365 rdp->n_rp_report_qs, 365 rdp->n_rp_report_qs,
366 rdp->n_rp_cb_ready, 366 rdp->n_rp_cb_ready,
367 rdp->n_rp_cpu_needs_gp); 367 rdp->n_rp_cpu_needs_gp);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 7a0b3bc7c5ed..5f748c5a40f0 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -534,7 +534,7 @@ static void rcu_spawn_tasks_kthread(void);
534 * Post an RCU-tasks callback. First call must be from process context 534 * Post an RCU-tasks callback. First call must be from process context
535 * after the scheduler if fully operational. 535 * after the scheduler if fully operational.
536 */ 536 */
537void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp)) 537void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
538{ 538{
539 unsigned long flags; 539 unsigned long flags;
540 bool needwake; 540 bool needwake;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 10a8faa1b0d4..aa5973220ad2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -817,7 +817,7 @@ static void set_load_weight(struct task_struct *p)
817 /* 817 /*
818 * SCHED_IDLE tasks get minimal weight: 818 * SCHED_IDLE tasks get minimal weight:
819 */ 819 */
820 if (p->policy == SCHED_IDLE) { 820 if (idle_policy(p->policy)) {
821 load->weight = scale_load(WEIGHT_IDLEPRIO); 821 load->weight = scale_load(WEIGHT_IDLEPRIO);
822 load->inv_weight = WMULT_IDLEPRIO; 822 load->inv_weight = WMULT_IDLEPRIO;
823 return; 823 return;
@@ -827,17 +827,19 @@ static void set_load_weight(struct task_struct *p)
827 load->inv_weight = prio_to_wmult[prio]; 827 load->inv_weight = prio_to_wmult[prio];
828} 828}
829 829
830static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 830static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
831{ 831{
832 update_rq_clock(rq); 832 update_rq_clock(rq);
833 sched_info_queued(rq, p); 833 if (!(flags & ENQUEUE_RESTORE))
834 sched_info_queued(rq, p);
834 p->sched_class->enqueue_task(rq, p, flags); 835 p->sched_class->enqueue_task(rq, p, flags);
835} 836}
836 837
837static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 838static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
838{ 839{
839 update_rq_clock(rq); 840 update_rq_clock(rq);
840 sched_info_dequeued(rq, p); 841 if (!(flags & DEQUEUE_SAVE))
842 sched_info_dequeued(rq, p);
841 p->sched_class->dequeue_task(rq, p, flags); 843 p->sched_class->dequeue_task(rq, p, flags);
842} 844}
843 845
@@ -1178,7 +1180,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1178 * holding rq->lock. 1180 * holding rq->lock.
1179 */ 1181 */
1180 lockdep_assert_held(&rq->lock); 1182 lockdep_assert_held(&rq->lock);
1181 dequeue_task(rq, p, 0); 1183 dequeue_task(rq, p, DEQUEUE_SAVE);
1182 } 1184 }
1183 if (running) 1185 if (running)
1184 put_prev_task(rq, p); 1186 put_prev_task(rq, p);
@@ -1188,7 +1190,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1188 if (running) 1190 if (running)
1189 p->sched_class->set_curr_task(rq); 1191 p->sched_class->set_curr_task(rq);
1190 if (queued) 1192 if (queued)
1191 enqueue_task(rq, p, 0); 1193 enqueue_task(rq, p, ENQUEUE_RESTORE);
1192} 1194}
1193 1195
1194/* 1196/*
@@ -1292,7 +1294,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1292 1294
1293 if (task_cpu(p) != new_cpu) { 1295 if (task_cpu(p) != new_cpu) {
1294 if (p->sched_class->migrate_task_rq) 1296 if (p->sched_class->migrate_task_rq)
1295 p->sched_class->migrate_task_rq(p, new_cpu); 1297 p->sched_class->migrate_task_rq(p);
1296 p->se.nr_migrations++; 1298 p->se.nr_migrations++;
1297 perf_event_task_migrate(p); 1299 perf_event_task_migrate(p);
1298 } 1300 }
@@ -1333,12 +1335,16 @@ static int migrate_swap_stop(void *data)
1333 struct rq *src_rq, *dst_rq; 1335 struct rq *src_rq, *dst_rq;
1334 int ret = -EAGAIN; 1336 int ret = -EAGAIN;
1335 1337
1338 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
1339 return -EAGAIN;
1340
1336 src_rq = cpu_rq(arg->src_cpu); 1341 src_rq = cpu_rq(arg->src_cpu);
1337 dst_rq = cpu_rq(arg->dst_cpu); 1342 dst_rq = cpu_rq(arg->dst_cpu);
1338 1343
1339 double_raw_lock(&arg->src_task->pi_lock, 1344 double_raw_lock(&arg->src_task->pi_lock,
1340 &arg->dst_task->pi_lock); 1345 &arg->dst_task->pi_lock);
1341 double_rq_lock(src_rq, dst_rq); 1346 double_rq_lock(src_rq, dst_rq);
1347
1342 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1348 if (task_cpu(arg->dst_task) != arg->dst_cpu)
1343 goto unlock; 1349 goto unlock;
1344 1350
@@ -1574,13 +1580,15 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1574 goto out; 1580 goto out;
1575 } 1581 }
1576 1582
1583 /* No more Mr. Nice Guy. */
1577 switch (state) { 1584 switch (state) {
1578 case cpuset: 1585 case cpuset:
1579 /* No more Mr. Nice Guy. */ 1586 if (IS_ENABLED(CONFIG_CPUSETS)) {
1580 cpuset_cpus_allowed_fallback(p); 1587 cpuset_cpus_allowed_fallback(p);
1581 state = possible; 1588 state = possible;
1582 break; 1589 break;
1583 1590 }
1591 /* fall-through */
1584 case possible: 1592 case possible:
1585 do_set_cpus_allowed(p, cpu_possible_mask); 1593 do_set_cpus_allowed(p, cpu_possible_mask);
1586 state = fail; 1594 state = fail;
@@ -1692,7 +1700,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1692#endif /* CONFIG_SCHEDSTATS */ 1700#endif /* CONFIG_SCHEDSTATS */
1693} 1701}
1694 1702
1695static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 1703static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1696{ 1704{
1697 activate_task(rq, p, en_flags); 1705 activate_task(rq, p, en_flags);
1698 p->on_rq = TASK_ON_RQ_QUEUED; 1706 p->on_rq = TASK_ON_RQ_QUEUED;
@@ -2114,23 +2122,17 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
2114#endif /* CONFIG_NUMA_BALANCING */ 2122#endif /* CONFIG_NUMA_BALANCING */
2115} 2123}
2116 2124
2125DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
2126
2117#ifdef CONFIG_NUMA_BALANCING 2127#ifdef CONFIG_NUMA_BALANCING
2118#ifdef CONFIG_SCHED_DEBUG 2128
2119void set_numabalancing_state(bool enabled) 2129void set_numabalancing_state(bool enabled)
2120{ 2130{
2121 if (enabled) 2131 if (enabled)
2122 sched_feat_set("NUMA"); 2132 static_branch_enable(&sched_numa_balancing);
2123 else 2133 else
2124 sched_feat_set("NO_NUMA"); 2134 static_branch_disable(&sched_numa_balancing);
2125}
2126#else
2127__read_mostly bool numabalancing_enabled;
2128
2129void set_numabalancing_state(bool enabled)
2130{
2131 numabalancing_enabled = enabled;
2132} 2135}
2133#endif /* CONFIG_SCHED_DEBUG */
2134 2136
2135#ifdef CONFIG_PROC_SYSCTL 2137#ifdef CONFIG_PROC_SYSCTL
2136int sysctl_numa_balancing(struct ctl_table *table, int write, 2138int sysctl_numa_balancing(struct ctl_table *table, int write,
@@ -2138,7 +2140,7 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
2138{ 2140{
2139 struct ctl_table t; 2141 struct ctl_table t;
2140 int err; 2142 int err;
2141 int state = numabalancing_enabled; 2143 int state = static_branch_likely(&sched_numa_balancing);
2142 2144
2143 if (write && !capable(CAP_SYS_ADMIN)) 2145 if (write && !capable(CAP_SYS_ADMIN))
2144 return -EPERM; 2146 return -EPERM;
@@ -2349,6 +2351,8 @@ void wake_up_new_task(struct task_struct *p)
2349 struct rq *rq; 2351 struct rq *rq;
2350 2352
2351 raw_spin_lock_irqsave(&p->pi_lock, flags); 2353 raw_spin_lock_irqsave(&p->pi_lock, flags);
2354 /* Initialize new task's runnable average */
2355 init_entity_runnable_average(&p->se);
2352#ifdef CONFIG_SMP 2356#ifdef CONFIG_SMP
2353 /* 2357 /*
2354 * Fork balancing, do it here and not earlier because: 2358 * Fork balancing, do it here and not earlier because:
@@ -2358,16 +2362,21 @@ void wake_up_new_task(struct task_struct *p)
2358 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2362 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2359#endif 2363#endif
2360 2364
2361 /* Initialize new task's runnable average */
2362 init_entity_runnable_average(&p->se);
2363 rq = __task_rq_lock(p); 2365 rq = __task_rq_lock(p);
2364 activate_task(rq, p, 0); 2366 activate_task(rq, p, 0);
2365 p->on_rq = TASK_ON_RQ_QUEUED; 2367 p->on_rq = TASK_ON_RQ_QUEUED;
2366 trace_sched_wakeup_new(p); 2368 trace_sched_wakeup_new(p);
2367 check_preempt_curr(rq, p, WF_FORK); 2369 check_preempt_curr(rq, p, WF_FORK);
2368#ifdef CONFIG_SMP 2370#ifdef CONFIG_SMP
2369 if (p->sched_class->task_woken) 2371 if (p->sched_class->task_woken) {
2372 /*
2373 * Nothing relies on rq->lock after this, so its fine to
2374 * drop it.
2375 */
2376 lockdep_unpin_lock(&rq->lock);
2370 p->sched_class->task_woken(rq, p); 2377 p->sched_class->task_woken(rq, p);
2378 lockdep_pin_lock(&rq->lock);
2379 }
2371#endif 2380#endif
2372 task_rq_unlock(rq, p, &flags); 2381 task_rq_unlock(rq, p, &flags);
2373} 2382}
@@ -2476,7 +2485,6 @@ static inline void
2476prepare_task_switch(struct rq *rq, struct task_struct *prev, 2485prepare_task_switch(struct rq *rq, struct task_struct *prev,
2477 struct task_struct *next) 2486 struct task_struct *next)
2478{ 2487{
2479 trace_sched_switch(prev, next);
2480 sched_info_switch(rq, prev, next); 2488 sched_info_switch(rq, prev, next);
2481 perf_event_task_sched_out(prev, next); 2489 perf_event_task_sched_out(prev, next);
2482 fire_sched_out_preempt_notifiers(prev, next); 2490 fire_sched_out_preempt_notifiers(prev, next);
@@ -2510,6 +2518,22 @@ static struct rq *finish_task_switch(struct task_struct *prev)
2510 struct mm_struct *mm = rq->prev_mm; 2518 struct mm_struct *mm = rq->prev_mm;
2511 long prev_state; 2519 long prev_state;
2512 2520
2521 /*
2522 * The previous task will have left us with a preempt_count of 2
2523 * because it left us after:
2524 *
2525 * schedule()
2526 * preempt_disable(); // 1
2527 * __schedule()
2528 * raw_spin_lock_irq(&rq->lock) // 2
2529 *
2530 * Also, see FORK_PREEMPT_COUNT.
2531 */
2532 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
2533 "corrupted preempt_count: %s/%d/0x%x\n",
2534 current->comm, current->pid, preempt_count()))
2535 preempt_count_set(FORK_PREEMPT_COUNT);
2536
2513 rq->prev_mm = NULL; 2537 rq->prev_mm = NULL;
2514 2538
2515 /* 2539 /*
@@ -2594,8 +2618,15 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
2594{ 2618{
2595 struct rq *rq; 2619 struct rq *rq;
2596 2620
2597 /* finish_task_switch() drops rq->lock and enables preemtion */ 2621 /*
2598 preempt_disable(); 2622 * New tasks start with FORK_PREEMPT_COUNT, see there and
2623 * finish_task_switch() for details.
2624 *
2625 * finish_task_switch() will drop rq->lock() and lower preempt_count
2626 * and the preempt_enable() will end up enabling preemption (on
2627 * PREEMPT_COUNT kernels).
2628 */
2629
2599 rq = finish_task_switch(prev); 2630 rq = finish_task_switch(prev);
2600 balance_callback(rq); 2631 balance_callback(rq);
2601 preempt_enable(); 2632 preempt_enable();
@@ -2953,15 +2984,13 @@ static noinline void __schedule_bug(struct task_struct *prev)
2953static inline void schedule_debug(struct task_struct *prev) 2984static inline void schedule_debug(struct task_struct *prev)
2954{ 2985{
2955#ifdef CONFIG_SCHED_STACK_END_CHECK 2986#ifdef CONFIG_SCHED_STACK_END_CHECK
2956 BUG_ON(unlikely(task_stack_end_corrupted(prev))); 2987 BUG_ON(task_stack_end_corrupted(prev));
2957#endif 2988#endif
2958 /* 2989
2959 * Test if we are atomic. Since do_exit() needs to call into 2990 if (unlikely(in_atomic_preempt_off())) {
2960 * schedule() atomically, we ignore that path. Otherwise whine
2961 * if we are scheduling when we should not.
2962 */
2963 if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
2964 __schedule_bug(prev); 2991 __schedule_bug(prev);
2992 preempt_count_set(PREEMPT_DISABLED);
2993 }
2965 rcu_sleep_check(); 2994 rcu_sleep_check();
2966 2995
2967 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 2996 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -3047,7 +3076,7 @@ again:
3047 * 3076 *
3048 * WARNING: must be called with preemption disabled! 3077 * WARNING: must be called with preemption disabled!
3049 */ 3078 */
3050static void __sched __schedule(void) 3079static void __sched notrace __schedule(bool preempt)
3051{ 3080{
3052 struct task_struct *prev, *next; 3081 struct task_struct *prev, *next;
3053 unsigned long *switch_count; 3082 unsigned long *switch_count;
@@ -3059,6 +3088,17 @@ static void __sched __schedule(void)
3059 rcu_note_context_switch(); 3088 rcu_note_context_switch();
3060 prev = rq->curr; 3089 prev = rq->curr;
3061 3090
3091 /*
3092 * do_exit() calls schedule() with preemption disabled as an exception;
3093 * however we must fix that up, otherwise the next task will see an
3094 * inconsistent (higher) preempt count.
3095 *
3096 * It also avoids the below schedule_debug() test from complaining
3097 * about this.
3098 */
3099 if (unlikely(prev->state == TASK_DEAD))
3100 preempt_enable_no_resched_notrace();
3101
3062 schedule_debug(prev); 3102 schedule_debug(prev);
3063 3103
3064 if (sched_feat(HRTICK)) 3104 if (sched_feat(HRTICK))
@@ -3076,7 +3116,7 @@ static void __sched __schedule(void)
3076 rq->clock_skip_update <<= 1; /* promote REQ to ACT */ 3116 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
3077 3117
3078 switch_count = &prev->nivcsw; 3118 switch_count = &prev->nivcsw;
3079 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3119 if (!preempt && prev->state) {
3080 if (unlikely(signal_pending_state(prev->state, prev))) { 3120 if (unlikely(signal_pending_state(prev->state, prev))) {
3081 prev->state = TASK_RUNNING; 3121 prev->state = TASK_RUNNING;
3082 } else { 3122 } else {
@@ -3112,6 +3152,7 @@ static void __sched __schedule(void)
3112 rq->curr = next; 3152 rq->curr = next;
3113 ++*switch_count; 3153 ++*switch_count;
3114 3154
3155 trace_sched_switch(preempt, prev, next);
3115 rq = context_switch(rq, prev, next); /* unlocks the rq */ 3156 rq = context_switch(rq, prev, next); /* unlocks the rq */
3116 cpu = cpu_of(rq); 3157 cpu = cpu_of(rq);
3117 } else { 3158 } else {
@@ -3141,7 +3182,7 @@ asmlinkage __visible void __sched schedule(void)
3141 sched_submit_work(tsk); 3182 sched_submit_work(tsk);
3142 do { 3183 do {
3143 preempt_disable(); 3184 preempt_disable();
3144 __schedule(); 3185 __schedule(false);
3145 sched_preempt_enable_no_resched(); 3186 sched_preempt_enable_no_resched();
3146 } while (need_resched()); 3187 } while (need_resched());
3147} 3188}
@@ -3181,9 +3222,9 @@ void __sched schedule_preempt_disabled(void)
3181static void __sched notrace preempt_schedule_common(void) 3222static void __sched notrace preempt_schedule_common(void)
3182{ 3223{
3183 do { 3224 do {
3184 preempt_active_enter(); 3225 preempt_disable_notrace();
3185 __schedule(); 3226 __schedule(true);
3186 preempt_active_exit(); 3227 preempt_enable_no_resched_notrace();
3187 3228
3188 /* 3229 /*
3189 * Check again in case we missed a preemption opportunity 3230 * Check again in case we missed a preemption opportunity
@@ -3234,24 +3275,17 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
3234 return; 3275 return;
3235 3276
3236 do { 3277 do {
3237 /* 3278 preempt_disable_notrace();
3238 * Use raw __prempt_count() ops that don't call function.
3239 * We can't call functions before disabling preemption which
3240 * disarm preemption tracing recursions.
3241 */
3242 __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
3243 barrier();
3244 /* 3279 /*
3245 * Needs preempt disabled in case user_exit() is traced 3280 * Needs preempt disabled in case user_exit() is traced
3246 * and the tracer calls preempt_enable_notrace() causing 3281 * and the tracer calls preempt_enable_notrace() causing
3247 * an infinite recursion. 3282 * an infinite recursion.
3248 */ 3283 */
3249 prev_ctx = exception_enter(); 3284 prev_ctx = exception_enter();
3250 __schedule(); 3285 __schedule(true);
3251 exception_exit(prev_ctx); 3286 exception_exit(prev_ctx);
3252 3287
3253 barrier(); 3288 preempt_enable_no_resched_notrace();
3254 __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
3255 } while (need_resched()); 3289 } while (need_resched());
3256} 3290}
3257EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 3291EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
@@ -3274,11 +3308,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
3274 prev_state = exception_enter(); 3308 prev_state = exception_enter();
3275 3309
3276 do { 3310 do {
3277 preempt_active_enter(); 3311 preempt_disable();
3278 local_irq_enable(); 3312 local_irq_enable();
3279 __schedule(); 3313 __schedule(true);
3280 local_irq_disable(); 3314 local_irq_disable();
3281 preempt_active_exit(); 3315 sched_preempt_enable_no_resched();
3282 } while (need_resched()); 3316 } while (need_resched());
3283 3317
3284 exception_exit(prev_state); 3318 exception_exit(prev_state);
@@ -3306,7 +3340,7 @@ EXPORT_SYMBOL(default_wake_function);
3306 */ 3340 */
3307void rt_mutex_setprio(struct task_struct *p, int prio) 3341void rt_mutex_setprio(struct task_struct *p, int prio)
3308{ 3342{
3309 int oldprio, queued, running, enqueue_flag = 0; 3343 int oldprio, queued, running, enqueue_flag = ENQUEUE_RESTORE;
3310 struct rq *rq; 3344 struct rq *rq;
3311 const struct sched_class *prev_class; 3345 const struct sched_class *prev_class;
3312 3346
@@ -3338,7 +3372,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3338 queued = task_on_rq_queued(p); 3372 queued = task_on_rq_queued(p);
3339 running = task_current(rq, p); 3373 running = task_current(rq, p);
3340 if (queued) 3374 if (queued)
3341 dequeue_task(rq, p, 0); 3375 dequeue_task(rq, p, DEQUEUE_SAVE);
3342 if (running) 3376 if (running)
3343 put_prev_task(rq, p); 3377 put_prev_task(rq, p);
3344 3378
@@ -3356,7 +3390,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3356 if (!dl_prio(p->normal_prio) || 3390 if (!dl_prio(p->normal_prio) ||
3357 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 3391 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
3358 p->dl.dl_boosted = 1; 3392 p->dl.dl_boosted = 1;
3359 enqueue_flag = ENQUEUE_REPLENISH; 3393 enqueue_flag |= ENQUEUE_REPLENISH;
3360 } else 3394 } else
3361 p->dl.dl_boosted = 0; 3395 p->dl.dl_boosted = 0;
3362 p->sched_class = &dl_sched_class; 3396 p->sched_class = &dl_sched_class;
@@ -3364,7 +3398,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3364 if (dl_prio(oldprio)) 3398 if (dl_prio(oldprio))
3365 p->dl.dl_boosted = 0; 3399 p->dl.dl_boosted = 0;
3366 if (oldprio < prio) 3400 if (oldprio < prio)
3367 enqueue_flag = ENQUEUE_HEAD; 3401 enqueue_flag |= ENQUEUE_HEAD;
3368 p->sched_class = &rt_sched_class; 3402 p->sched_class = &rt_sched_class;
3369 } else { 3403 } else {
3370 if (dl_prio(oldprio)) 3404 if (dl_prio(oldprio))
@@ -3416,7 +3450,7 @@ void set_user_nice(struct task_struct *p, long nice)
3416 } 3450 }
3417 queued = task_on_rq_queued(p); 3451 queued = task_on_rq_queued(p);
3418 if (queued) 3452 if (queued)
3419 dequeue_task(rq, p, 0); 3453 dequeue_task(rq, p, DEQUEUE_SAVE);
3420 3454
3421 p->static_prio = NICE_TO_PRIO(nice); 3455 p->static_prio = NICE_TO_PRIO(nice);
3422 set_load_weight(p); 3456 set_load_weight(p);
@@ -3425,7 +3459,7 @@ void set_user_nice(struct task_struct *p, long nice)
3425 delta = p->prio - old_prio; 3459 delta = p->prio - old_prio;
3426 3460
3427 if (queued) { 3461 if (queued) {
3428 enqueue_task(rq, p, 0); 3462 enqueue_task(rq, p, ENQUEUE_RESTORE);
3429 /* 3463 /*
3430 * If the task increased its priority or is running and 3464 * If the task increased its priority or is running and
3431 * lowered its priority, then reschedule its CPU: 3465 * lowered its priority, then reschedule its CPU:
@@ -3746,10 +3780,7 @@ recheck:
3746 } else { 3780 } else {
3747 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 3781 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
3748 3782
3749 if (policy != SCHED_DEADLINE && 3783 if (!valid_policy(policy))
3750 policy != SCHED_FIFO && policy != SCHED_RR &&
3751 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3752 policy != SCHED_IDLE)
3753 return -EINVAL; 3784 return -EINVAL;
3754 } 3785 }
3755 3786
@@ -3805,7 +3836,7 @@ recheck:
3805 * Treat SCHED_IDLE as nice 20. Only allow a switch to 3836 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3806 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3837 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
3807 */ 3838 */
3808 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { 3839 if (idle_policy(p->policy) && !idle_policy(policy)) {
3809 if (!can_nice(p, task_nice(p))) 3840 if (!can_nice(p, task_nice(p)))
3810 return -EPERM; 3841 return -EPERM;
3811 } 3842 }
@@ -3930,7 +3961,7 @@ change:
3930 queued = task_on_rq_queued(p); 3961 queued = task_on_rq_queued(p);
3931 running = task_current(rq, p); 3962 running = task_current(rq, p);
3932 if (queued) 3963 if (queued)
3933 dequeue_task(rq, p, 0); 3964 dequeue_task(rq, p, DEQUEUE_SAVE);
3934 if (running) 3965 if (running)
3935 put_prev_task(rq, p); 3966 put_prev_task(rq, p);
3936 3967
@@ -3940,11 +3971,15 @@ change:
3940 if (running) 3971 if (running)
3941 p->sched_class->set_curr_task(rq); 3972 p->sched_class->set_curr_task(rq);
3942 if (queued) { 3973 if (queued) {
3974 int enqueue_flags = ENQUEUE_RESTORE;
3943 /* 3975 /*
3944 * We enqueue to tail when the priority of a task is 3976 * We enqueue to tail when the priority of a task is
3945 * increased (user space view). 3977 * increased (user space view).
3946 */ 3978 */
3947 enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); 3979 if (oldprio <= p->prio)
3980 enqueue_flags |= ENQUEUE_HEAD;
3981
3982 enqueue_task(rq, p, enqueue_flags);
3948 } 3983 }
3949 3984
3950 check_class_changed(rq, p, prev_class, oldprio); 3985 check_class_changed(rq, p, prev_class, oldprio);
@@ -4022,6 +4057,7 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4022{ 4057{
4023 return _sched_setscheduler(p, policy, param, false); 4058 return _sched_setscheduler(p, policy, param, false);
4024} 4059}
4060EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
4025 4061
4026static int 4062static int
4027do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4063do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
@@ -5093,7 +5129,7 @@ void sched_setnuma(struct task_struct *p, int nid)
5093 running = task_current(rq, p); 5129 running = task_current(rq, p);
5094 5130
5095 if (queued) 5131 if (queued)
5096 dequeue_task(rq, p, 0); 5132 dequeue_task(rq, p, DEQUEUE_SAVE);
5097 if (running) 5133 if (running)
5098 put_prev_task(rq, p); 5134 put_prev_task(rq, p);
5099 5135
@@ -5102,7 +5138,7 @@ void sched_setnuma(struct task_struct *p, int nid)
5102 if (running) 5138 if (running)
5103 p->sched_class->set_curr_task(rq); 5139 p->sched_class->set_curr_task(rq);
5104 if (queued) 5140 if (queued)
5105 enqueue_task(rq, p, 0); 5141 enqueue_task(rq, p, ENQUEUE_RESTORE);
5106 task_rq_unlock(rq, p, &flags); 5142 task_rq_unlock(rq, p, &flags);
5107} 5143}
5108#endif /* CONFIG_NUMA_BALANCING */ 5144#endif /* CONFIG_NUMA_BALANCING */
@@ -5523,21 +5559,27 @@ static void set_cpu_rq_start_time(void)
5523static int sched_cpu_active(struct notifier_block *nfb, 5559static int sched_cpu_active(struct notifier_block *nfb,
5524 unsigned long action, void *hcpu) 5560 unsigned long action, void *hcpu)
5525{ 5561{
5562 int cpu = (long)hcpu;
5563
5526 switch (action & ~CPU_TASKS_FROZEN) { 5564 switch (action & ~CPU_TASKS_FROZEN) {
5527 case CPU_STARTING: 5565 case CPU_STARTING:
5528 set_cpu_rq_start_time(); 5566 set_cpu_rq_start_time();
5529 return NOTIFY_OK; 5567 return NOTIFY_OK;
5568
5530 case CPU_ONLINE: 5569 case CPU_ONLINE:
5531 /* 5570 /*
5532 * At this point a starting CPU has marked itself as online via 5571 * At this point a starting CPU has marked itself as online via
5533 * set_cpu_online(). But it might not yet have marked itself 5572 * set_cpu_online(). But it might not yet have marked itself
5534 * as active, which is essential from here on. 5573 * as active, which is essential from here on.
5535 *
5536 * Thus, fall-through and help the starting CPU along.
5537 */ 5574 */
5575 set_cpu_active(cpu, true);
5576 stop_machine_unpark(cpu);
5577 return NOTIFY_OK;
5578
5538 case CPU_DOWN_FAILED: 5579 case CPU_DOWN_FAILED:
5539 set_cpu_active((long)hcpu, true); 5580 set_cpu_active(cpu, true);
5540 return NOTIFY_OK; 5581 return NOTIFY_OK;
5582
5541 default: 5583 default:
5542 return NOTIFY_DONE; 5584 return NOTIFY_DONE;
5543 } 5585 }
@@ -6469,7 +6511,8 @@ static struct sched_domain_topology_level default_topology[] = {
6469 { NULL, }, 6511 { NULL, },
6470}; 6512};
6471 6513
6472struct sched_domain_topology_level *sched_domain_topology = default_topology; 6514static struct sched_domain_topology_level *sched_domain_topology =
6515 default_topology;
6473 6516
6474#define for_each_sd_topology(tl) \ 6517#define for_each_sd_topology(tl) \
6475 for (tl = sched_domain_topology; tl->mask; tl++) 6518 for (tl = sched_domain_topology; tl->mask; tl++)
@@ -7238,9 +7281,6 @@ void __init sched_init_smp(void)
7238 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 7281 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
7239 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 7282 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
7240 7283
7241 /* nohz_full won't take effect without isolating the cpus. */
7242 tick_nohz_full_add_cpus_to(cpu_isolated_map);
7243
7244 sched_init_numa(); 7284 sched_init_numa();
7245 7285
7246 /* 7286 /*
@@ -7473,7 +7513,7 @@ void __init sched_init(void)
7473#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 7513#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
7474static inline int preempt_count_equals(int preempt_offset) 7514static inline int preempt_count_equals(int preempt_offset)
7475{ 7515{
7476 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); 7516 int nested = preempt_count() + rcu_preempt_depth();
7477 7517
7478 return (nested == preempt_offset); 7518 return (nested == preempt_offset);
7479} 7519}
@@ -7720,7 +7760,7 @@ void sched_move_task(struct task_struct *tsk)
7720 queued = task_on_rq_queued(tsk); 7760 queued = task_on_rq_queued(tsk);
7721 7761
7722 if (queued) 7762 if (queued)
7723 dequeue_task(rq, tsk, 0); 7763 dequeue_task(rq, tsk, DEQUEUE_SAVE);
7724 if (unlikely(running)) 7764 if (unlikely(running))
7725 put_prev_task(rq, tsk); 7765 put_prev_task(rq, tsk);
7726 7766
@@ -7736,7 +7776,7 @@ void sched_move_task(struct task_struct *tsk)
7736 7776
7737#ifdef CONFIG_FAIR_GROUP_SCHED 7777#ifdef CONFIG_FAIR_GROUP_SCHED
7738 if (tsk->sched_class->task_move_group) 7778 if (tsk->sched_class->task_move_group)
7739 tsk->sched_class->task_move_group(tsk, queued); 7779 tsk->sched_class->task_move_group(tsk);
7740 else 7780 else
7741#endif 7781#endif
7742 set_task_rq(tsk, task_cpu(tsk)); 7782 set_task_rq(tsk, task_cpu(tsk));
@@ -7744,7 +7784,7 @@ void sched_move_task(struct task_struct *tsk)
7744 if (unlikely(running)) 7784 if (unlikely(running))
7745 tsk->sched_class->set_curr_task(rq); 7785 tsk->sched_class->set_curr_task(rq);
7746 if (queued) 7786 if (queued)
7747 enqueue_task(rq, tsk, 0); 7787 enqueue_task(rq, tsk, ENQUEUE_RESTORE);
7748 7788
7749 task_rq_unlock(rq, tsk, &flags); 7789 task_rq_unlock(rq, tsk, &flags);
7750} 7790}
@@ -8208,14 +8248,6 @@ static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
8208 struct cgroup_subsys_state *old_css, 8248 struct cgroup_subsys_state *old_css,
8209 struct task_struct *task) 8249 struct task_struct *task)
8210{ 8250{
8211 /*
8212 * cgroup_exit() is called in the copy_process() failure path.
8213 * Ignore this case since the task hasn't ran yet, this avoids
8214 * trying to poke a half freed task state from generic code.
8215 */
8216 if (!(task->flags & PF_EXITING))
8217 return;
8218
8219 sched_move_task(task); 8251 sched_move_task(task);
8220} 8252}
8221 8253
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index c6acb07466bb..5a75b08cfd85 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -31,11 +31,6 @@ static inline int right_child(int i)
31 return (i << 1) + 2; 31 return (i << 1) + 2;
32} 32}
33 33
34static inline int dl_time_before(u64 a, u64 b)
35{
36 return (s64)(a - b) < 0;
37}
38
39static void cpudl_exchange(struct cpudl *cp, int a, int b) 34static void cpudl_exchange(struct cpudl *cp, int a, int b)
40{ 35{
41 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; 36 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h
index 1a0a6ef2fbe1..fcbdf83fed7e 100644
--- a/kernel/sched/cpudeadline.h
+++ b/kernel/sched/cpudeadline.h
@@ -2,6 +2,7 @@
2#define _LINUX_CPUDL_H 2#define _LINUX_CPUDL_H
3 3
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/sched/deadline.h>
5 6
6#define IDX_INVALID -1 7#define IDX_INVALID -1
7 8
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index fc8f01083527..8b0a15e285f9 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -668,8 +668,15 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
668 * Queueing this task back might have overloaded rq, check if we need 668 * Queueing this task back might have overloaded rq, check if we need
669 * to kick someone away. 669 * to kick someone away.
670 */ 670 */
671 if (has_pushable_dl_tasks(rq)) 671 if (has_pushable_dl_tasks(rq)) {
672 /*
673 * Nothing relies on rq->lock after this, so its safe to drop
674 * rq->lock.
675 */
676 lockdep_unpin_lock(&rq->lock);
672 push_dl_task(rq); 677 push_dl_task(rq);
678 lockdep_pin_lock(&rq->lock);
679 }
673#endif 680#endif
674 681
675unlock: 682unlock:
@@ -1066,8 +1073,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1066 int target = find_later_rq(p); 1073 int target = find_later_rq(p);
1067 1074
1068 if (target != -1 && 1075 if (target != -1 &&
1069 dl_time_before(p->dl.deadline, 1076 (dl_time_before(p->dl.deadline,
1070 cpu_rq(target)->dl.earliest_dl.curr)) 1077 cpu_rq(target)->dl.earliest_dl.curr) ||
1078 (cpu_rq(target)->dl.dl_nr_running == 0)))
1071 cpu = target; 1079 cpu = target;
1072 } 1080 }
1073 rcu_read_unlock(); 1081 rcu_read_unlock();
@@ -1417,7 +1425,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1417 1425
1418 later_rq = cpu_rq(cpu); 1426 later_rq = cpu_rq(cpu);
1419 1427
1420 if (!dl_time_before(task->dl.deadline, 1428 if (later_rq->dl.dl_nr_running &&
1429 !dl_time_before(task->dl.deadline,
1421 later_rq->dl.earliest_dl.curr)) { 1430 later_rq->dl.earliest_dl.curr)) {
1422 /* 1431 /*
1423 * Target rq has tasks of equal or earlier deadline, 1432 * Target rq has tasks of equal or earlier deadline,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6e2e3483b1ec..824aa9f501a3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -661,11 +661,12 @@ static unsigned long task_h_load(struct task_struct *p);
661 661
662/* 662/*
663 * We choose a half-life close to 1 scheduling period. 663 * We choose a half-life close to 1 scheduling period.
664 * Note: The tables below are dependent on this value. 664 * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
665 * dependent on this value.
665 */ 666 */
666#define LOAD_AVG_PERIOD 32 667#define LOAD_AVG_PERIOD 32
667#define LOAD_AVG_MAX 47742 /* maximum possible load avg */ 668#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
668#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */ 669#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
669 670
670/* Give new sched_entity start runnable values to heavy its load in infant time */ 671/* Give new sched_entity start runnable values to heavy its load in infant time */
671void init_entity_runnable_average(struct sched_entity *se) 672void init_entity_runnable_average(struct sched_entity *se)
@@ -682,7 +683,7 @@ void init_entity_runnable_average(struct sched_entity *se)
682 sa->load_avg = scale_load_down(se->load.weight); 683 sa->load_avg = scale_load_down(se->load.weight);
683 sa->load_sum = sa->load_avg * LOAD_AVG_MAX; 684 sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
684 sa->util_avg = scale_load_down(SCHED_LOAD_SCALE); 685 sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
685 sa->util_sum = LOAD_AVG_MAX; 686 sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
686 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ 687 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
687} 688}
688 689
@@ -2069,7 +2070,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2069 int local = !!(flags & TNF_FAULT_LOCAL); 2070 int local = !!(flags & TNF_FAULT_LOCAL);
2070 int priv; 2071 int priv;
2071 2072
2072 if (!numabalancing_enabled) 2073 if (!static_branch_likely(&sched_numa_balancing))
2073 return; 2074 return;
2074 2075
2075 /* for example, ksmd faulting in a user's mm */ 2076 /* for example, ksmd faulting in a user's mm */
@@ -2157,7 +2158,7 @@ void task_numa_work(struct callback_head *work)
2157 struct vm_area_struct *vma; 2158 struct vm_area_struct *vma;
2158 unsigned long start, end; 2159 unsigned long start, end;
2159 unsigned long nr_pte_updates = 0; 2160 unsigned long nr_pte_updates = 0;
2160 long pages; 2161 long pages, virtpages;
2161 2162
2162 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work)); 2163 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
2163 2164
@@ -2203,9 +2204,11 @@ void task_numa_work(struct callback_head *work)
2203 start = mm->numa_scan_offset; 2204 start = mm->numa_scan_offset;
2204 pages = sysctl_numa_balancing_scan_size; 2205 pages = sysctl_numa_balancing_scan_size;
2205 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ 2206 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2207 virtpages = pages * 8; /* Scan up to this much virtual space */
2206 if (!pages) 2208 if (!pages)
2207 return; 2209 return;
2208 2210
2211
2209 down_read(&mm->mmap_sem); 2212 down_read(&mm->mmap_sem);
2210 vma = find_vma(mm, start); 2213 vma = find_vma(mm, start);
2211 if (!vma) { 2214 if (!vma) {
@@ -2240,18 +2243,22 @@ void task_numa_work(struct callback_head *work)
2240 start = max(start, vma->vm_start); 2243 start = max(start, vma->vm_start);
2241 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 2244 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2242 end = min(end, vma->vm_end); 2245 end = min(end, vma->vm_end);
2243 nr_pte_updates += change_prot_numa(vma, start, end); 2246 nr_pte_updates = change_prot_numa(vma, start, end);
2244 2247
2245 /* 2248 /*
2246 * Scan sysctl_numa_balancing_scan_size but ensure that 2249 * Try to scan sysctl_numa_balancing_size worth of
2247 * at least one PTE is updated so that unused virtual 2250 * hpages that have at least one present PTE that
2248 * address space is quickly skipped. 2251 * is not already pte-numa. If the VMA contains
2252 * areas that are unused or already full of prot_numa
2253 * PTEs, scan up to virtpages, to skip through those
2254 * areas faster.
2249 */ 2255 */
2250 if (nr_pte_updates) 2256 if (nr_pte_updates)
2251 pages -= (end - start) >> PAGE_SHIFT; 2257 pages -= (end - start) >> PAGE_SHIFT;
2258 virtpages -= (end - start) >> PAGE_SHIFT;
2252 2259
2253 start = end; 2260 start = end;
2254 if (pages <= 0) 2261 if (pages <= 0 || virtpages <= 0)
2255 goto out; 2262 goto out;
2256 2263
2257 cond_resched(); 2264 cond_resched();
@@ -2363,7 +2370,7 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2363 */ 2370 */
2364 tg_weight = atomic_long_read(&tg->load_avg); 2371 tg_weight = atomic_long_read(&tg->load_avg);
2365 tg_weight -= cfs_rq->tg_load_avg_contrib; 2372 tg_weight -= cfs_rq->tg_load_avg_contrib;
2366 tg_weight += cfs_rq_load_avg(cfs_rq); 2373 tg_weight += cfs_rq->load.weight;
2367 2374
2368 return tg_weight; 2375 return tg_weight;
2369} 2376}
@@ -2373,7 +2380,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2373 long tg_weight, load, shares; 2380 long tg_weight, load, shares;
2374 2381
2375 tg_weight = calc_tg_weight(tg, cfs_rq); 2382 tg_weight = calc_tg_weight(tg, cfs_rq);
2376 load = cfs_rq_load_avg(cfs_rq); 2383 load = cfs_rq->load.weight;
2377 2384
2378 shares = (tg->shares * load); 2385 shares = (tg->shares * load);
2379 if (tg_weight) 2386 if (tg_weight)
@@ -2515,6 +2522,12 @@ static u32 __compute_runnable_contrib(u64 n)
2515 return contrib + runnable_avg_yN_sum[n]; 2522 return contrib + runnable_avg_yN_sum[n];
2516} 2523}
2517 2524
2525#if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10
2526#error "load tracking assumes 2^10 as unit"
2527#endif
2528
2529#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
2530
2518/* 2531/*
2519 * We can represent the historical contribution to runnable average as the 2532 * We can represent the historical contribution to runnable average as the
2520 * coefficients of a geometric series. To do this we sub-divide our runnable 2533 * coefficients of a geometric series. To do this we sub-divide our runnable
@@ -2547,10 +2560,10 @@ static __always_inline int
2547__update_load_avg(u64 now, int cpu, struct sched_avg *sa, 2560__update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2548 unsigned long weight, int running, struct cfs_rq *cfs_rq) 2561 unsigned long weight, int running, struct cfs_rq *cfs_rq)
2549{ 2562{
2550 u64 delta, periods; 2563 u64 delta, scaled_delta, periods;
2551 u32 contrib; 2564 u32 contrib;
2552 int delta_w, decayed = 0; 2565 unsigned int delta_w, scaled_delta_w, decayed = 0;
2553 unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu); 2566 unsigned long scale_freq, scale_cpu;
2554 2567
2555 delta = now - sa->last_update_time; 2568 delta = now - sa->last_update_time;
2556 /* 2569 /*
@@ -2571,6 +2584,9 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2571 return 0; 2584 return 0;
2572 sa->last_update_time = now; 2585 sa->last_update_time = now;
2573 2586
2587 scale_freq = arch_scale_freq_capacity(NULL, cpu);
2588 scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
2589
2574 /* delta_w is the amount already accumulated against our next period */ 2590 /* delta_w is the amount already accumulated against our next period */
2575 delta_w = sa->period_contrib; 2591 delta_w = sa->period_contrib;
2576 if (delta + delta_w >= 1024) { 2592 if (delta + delta_w >= 1024) {
@@ -2585,13 +2601,16 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2585 * period and accrue it. 2601 * period and accrue it.
2586 */ 2602 */
2587 delta_w = 1024 - delta_w; 2603 delta_w = 1024 - delta_w;
2604 scaled_delta_w = cap_scale(delta_w, scale_freq);
2588 if (weight) { 2605 if (weight) {
2589 sa->load_sum += weight * delta_w; 2606 sa->load_sum += weight * scaled_delta_w;
2590 if (cfs_rq) 2607 if (cfs_rq) {
2591 cfs_rq->runnable_load_sum += weight * delta_w; 2608 cfs_rq->runnable_load_sum +=
2609 weight * scaled_delta_w;
2610 }
2592 } 2611 }
2593 if (running) 2612 if (running)
2594 sa->util_sum += delta_w * scale_freq >> SCHED_CAPACITY_SHIFT; 2613 sa->util_sum += scaled_delta_w * scale_cpu;
2595 2614
2596 delta -= delta_w; 2615 delta -= delta_w;
2597 2616
@@ -2608,23 +2627,25 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2608 2627
2609 /* Efficiently calculate \sum (1..n_period) 1024*y^i */ 2628 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2610 contrib = __compute_runnable_contrib(periods); 2629 contrib = __compute_runnable_contrib(periods);
2630 contrib = cap_scale(contrib, scale_freq);
2611 if (weight) { 2631 if (weight) {
2612 sa->load_sum += weight * contrib; 2632 sa->load_sum += weight * contrib;
2613 if (cfs_rq) 2633 if (cfs_rq)
2614 cfs_rq->runnable_load_sum += weight * contrib; 2634 cfs_rq->runnable_load_sum += weight * contrib;
2615 } 2635 }
2616 if (running) 2636 if (running)
2617 sa->util_sum += contrib * scale_freq >> SCHED_CAPACITY_SHIFT; 2637 sa->util_sum += contrib * scale_cpu;
2618 } 2638 }
2619 2639
2620 /* Remainder of delta accrued against u_0` */ 2640 /* Remainder of delta accrued against u_0` */
2641 scaled_delta = cap_scale(delta, scale_freq);
2621 if (weight) { 2642 if (weight) {
2622 sa->load_sum += weight * delta; 2643 sa->load_sum += weight * scaled_delta;
2623 if (cfs_rq) 2644 if (cfs_rq)
2624 cfs_rq->runnable_load_sum += weight * delta; 2645 cfs_rq->runnable_load_sum += weight * scaled_delta;
2625 } 2646 }
2626 if (running) 2647 if (running)
2627 sa->util_sum += delta * scale_freq >> SCHED_CAPACITY_SHIFT; 2648 sa->util_sum += scaled_delta * scale_cpu;
2628 2649
2629 sa->period_contrib += delta; 2650 sa->period_contrib += delta;
2630 2651
@@ -2634,7 +2655,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2634 cfs_rq->runnable_load_avg = 2655 cfs_rq->runnable_load_avg =
2635 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX); 2656 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2636 } 2657 }
2637 sa->util_avg = (sa->util_sum << SCHED_LOAD_SHIFT) / LOAD_AVG_MAX; 2658 sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
2638 } 2659 }
2639 2660
2640 return decayed; 2661 return decayed;
@@ -2664,20 +2685,20 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2664/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ 2685/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
2665static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 2686static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2666{ 2687{
2667 int decayed;
2668 struct sched_avg *sa = &cfs_rq->avg; 2688 struct sched_avg *sa = &cfs_rq->avg;
2689 int decayed, removed = 0;
2669 2690
2670 if (atomic_long_read(&cfs_rq->removed_load_avg)) { 2691 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
2671 long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); 2692 long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
2672 sa->load_avg = max_t(long, sa->load_avg - r, 0); 2693 sa->load_avg = max_t(long, sa->load_avg - r, 0);
2673 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); 2694 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
2695 removed = 1;
2674 } 2696 }
2675 2697
2676 if (atomic_long_read(&cfs_rq->removed_util_avg)) { 2698 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
2677 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); 2699 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
2678 sa->util_avg = max_t(long, sa->util_avg - r, 0); 2700 sa->util_avg = max_t(long, sa->util_avg - r, 0);
2679 sa->util_sum = max_t(s32, sa->util_sum - 2701 sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
2680 ((r * LOAD_AVG_MAX) >> SCHED_LOAD_SHIFT), 0);
2681 } 2702 }
2682 2703
2683 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, 2704 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
@@ -2688,40 +2709,77 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2688 cfs_rq->load_last_update_time_copy = sa->last_update_time; 2709 cfs_rq->load_last_update_time_copy = sa->last_update_time;
2689#endif 2710#endif
2690 2711
2691 return decayed; 2712 return decayed || removed;
2692} 2713}
2693 2714
2694/* Update task and its cfs_rq load average */ 2715/* Update task and its cfs_rq load average */
2695static inline void update_load_avg(struct sched_entity *se, int update_tg) 2716static inline void update_load_avg(struct sched_entity *se, int update_tg)
2696{ 2717{
2697 struct cfs_rq *cfs_rq = cfs_rq_of(se); 2718 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2698 int cpu = cpu_of(rq_of(cfs_rq));
2699 u64 now = cfs_rq_clock_task(cfs_rq); 2719 u64 now = cfs_rq_clock_task(cfs_rq);
2720 int cpu = cpu_of(rq_of(cfs_rq));
2700 2721
2701 /* 2722 /*
2702 * Track task load average for carrying it to new CPU after migrated, and 2723 * Track task load average for carrying it to new CPU after migrated, and
2703 * track group sched_entity load average for task_h_load calc in migration 2724 * track group sched_entity load average for task_h_load calc in migration
2704 */ 2725 */
2705 __update_load_avg(now, cpu, &se->avg, 2726 __update_load_avg(now, cpu, &se->avg,
2706 se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL); 2727 se->on_rq * scale_load_down(se->load.weight),
2728 cfs_rq->curr == se, NULL);
2707 2729
2708 if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg) 2730 if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
2709 update_tg_load_avg(cfs_rq, 0); 2731 update_tg_load_avg(cfs_rq, 0);
2710} 2732}
2711 2733
2734static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2735{
2736 if (!sched_feat(ATTACH_AGE_LOAD))
2737 goto skip_aging;
2738
2739 /*
2740 * If we got migrated (either between CPUs or between cgroups) we'll
2741 * have aged the average right before clearing @last_update_time.
2742 */
2743 if (se->avg.last_update_time) {
2744 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
2745 &se->avg, 0, 0, NULL);
2746
2747 /*
2748 * XXX: we could have just aged the entire load away if we've been
2749 * absent from the fair class for too long.
2750 */
2751 }
2752
2753skip_aging:
2754 se->avg.last_update_time = cfs_rq->avg.last_update_time;
2755 cfs_rq->avg.load_avg += se->avg.load_avg;
2756 cfs_rq->avg.load_sum += se->avg.load_sum;
2757 cfs_rq->avg.util_avg += se->avg.util_avg;
2758 cfs_rq->avg.util_sum += se->avg.util_sum;
2759}
2760
2761static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2762{
2763 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
2764 &se->avg, se->on_rq * scale_load_down(se->load.weight),
2765 cfs_rq->curr == se, NULL);
2766
2767 cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
2768 cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
2769 cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
2770 cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
2771}
2772
2712/* Add the load generated by se into cfs_rq's load average */ 2773/* Add the load generated by se into cfs_rq's load average */
2713static inline void 2774static inline void
2714enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2775enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2715{ 2776{
2716 struct sched_avg *sa = &se->avg; 2777 struct sched_avg *sa = &se->avg;
2717 u64 now = cfs_rq_clock_task(cfs_rq); 2778 u64 now = cfs_rq_clock_task(cfs_rq);
2718 int migrated = 0, decayed; 2779 int migrated, decayed;
2719 2780
2720 if (sa->last_update_time == 0) { 2781 migrated = !sa->last_update_time;
2721 sa->last_update_time = now; 2782 if (!migrated) {
2722 migrated = 1;
2723 }
2724 else {
2725 __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, 2783 __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
2726 se->on_rq * scale_load_down(se->load.weight), 2784 se->on_rq * scale_load_down(se->load.weight),
2727 cfs_rq->curr == se, NULL); 2785 cfs_rq->curr == se, NULL);
@@ -2732,12 +2790,8 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2732 cfs_rq->runnable_load_avg += sa->load_avg; 2790 cfs_rq->runnable_load_avg += sa->load_avg;
2733 cfs_rq->runnable_load_sum += sa->load_sum; 2791 cfs_rq->runnable_load_sum += sa->load_sum;
2734 2792
2735 if (migrated) { 2793 if (migrated)
2736 cfs_rq->avg.load_avg += sa->load_avg; 2794 attach_entity_load_avg(cfs_rq, se);
2737 cfs_rq->avg.load_sum += sa->load_sum;
2738 cfs_rq->avg.util_avg += sa->util_avg;
2739 cfs_rq->avg.util_sum += sa->util_sum;
2740 }
2741 2795
2742 if (decayed || migrated) 2796 if (decayed || migrated)
2743 update_tg_load_avg(cfs_rq, 0); 2797 update_tg_load_avg(cfs_rq, 0);
@@ -2752,7 +2806,7 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2752 cfs_rq->runnable_load_avg = 2806 cfs_rq->runnable_load_avg =
2753 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0); 2807 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
2754 cfs_rq->runnable_load_sum = 2808 cfs_rq->runnable_load_sum =
2755 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0); 2809 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
2756} 2810}
2757 2811
2758/* 2812/*
@@ -2820,6 +2874,11 @@ static inline void
2820dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 2874dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
2821static inline void remove_entity_load_avg(struct sched_entity *se) {} 2875static inline void remove_entity_load_avg(struct sched_entity *se) {}
2822 2876
2877static inline void
2878attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
2879static inline void
2880detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
2881
2823static inline int idle_balance(struct rq *rq) 2882static inline int idle_balance(struct rq *rq)
2824{ 2883{
2825 return 0; 2884 return 0;
@@ -4816,32 +4875,39 @@ next:
4816done: 4875done:
4817 return target; 4876 return target;
4818} 4877}
4878
4819/* 4879/*
4820 * get_cpu_usage returns the amount of capacity of a CPU that is used by CFS 4880 * cpu_util returns the amount of capacity of a CPU that is used by CFS
4821 * tasks. The unit of the return value must be the one of capacity so we can 4881 * tasks. The unit of the return value must be the one of capacity so we can
4822 * compare the usage with the capacity of the CPU that is available for CFS 4882 * compare the utilization with the capacity of the CPU that is available for
4823 * task (ie cpu_capacity). 4883 * CFS task (ie cpu_capacity).
4824 * cfs.avg.util_avg is the sum of running time of runnable tasks on a 4884 *
4825 * CPU. It represents the amount of utilization of a CPU in the range 4885 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
4826 * [0..SCHED_LOAD_SCALE]. The usage of a CPU can't be higher than the full 4886 * recent utilization of currently non-runnable tasks on a CPU. It represents
4827 * capacity of the CPU because it's about the running time on this CPU. 4887 * the amount of utilization of a CPU in the range [0..capacity_orig] where
4828 * Nevertheless, cfs.avg.util_avg can be higher than SCHED_LOAD_SCALE 4888 * capacity_orig is the cpu_capacity available at the highest frequency
4829 * because of unfortunate rounding in util_avg or just 4889 * (arch_scale_freq_capacity()).
4830 * after migrating tasks until the average stabilizes with the new running 4890 * The utilization of a CPU converges towards a sum equal to or less than the
4831 * time. So we need to check that the usage stays into the range 4891 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
4832 * [0..cpu_capacity_orig] and cap if necessary. 4892 * the running time on this CPU scaled by capacity_curr.
4833 * Without capping the usage, a group could be seen as overloaded (CPU0 usage 4893 *
4834 * at 121% + CPU1 usage at 80%) whereas CPU1 has 20% of available capacity 4894 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
4895 * higher than capacity_orig because of unfortunate rounding in
4896 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
4897 * the average stabilizes with the new running time. We need to check that the
4898 * utilization stays within the range of [0..capacity_orig] and cap it if
4899 * necessary. Without utilization capping, a group could be seen as overloaded
4900 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
4901 * available capacity. We allow utilization to overshoot capacity_curr (but not
4902 * capacity_orig) as it useful for predicting the capacity required after task
4903 * migrations (scheduler-driven DVFS).
4835 */ 4904 */
4836static int get_cpu_usage(int cpu) 4905static int cpu_util(int cpu)
4837{ 4906{
4838 unsigned long usage = cpu_rq(cpu)->cfs.avg.util_avg; 4907 unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
4839 unsigned long capacity = capacity_orig_of(cpu); 4908 unsigned long capacity = capacity_orig_of(cpu);
4840 4909
4841 if (usage >= SCHED_LOAD_SCALE) 4910 return (util >= capacity) ? capacity : util;
4842 return capacity;
4843
4844 return (usage * capacity) >> SCHED_LOAD_SHIFT;
4845} 4911}
4846 4912
4847/* 4913/*
@@ -4944,7 +5010,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
4944 * previous cpu. However, the caller only guarantees p->pi_lock is held; no 5010 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
4945 * other assumptions, including the state of rq->lock, should be made. 5011 * other assumptions, including the state of rq->lock, should be made.
4946 */ 5012 */
4947static void migrate_task_rq_fair(struct task_struct *p, int next_cpu) 5013static void migrate_task_rq_fair(struct task_struct *p)
4948{ 5014{
4949 /* 5015 /*
4950 * We are supposed to update the task to "current" time, then its up to date 5016 * We are supposed to update the task to "current" time, then its up to date
@@ -5524,10 +5590,10 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
5524 unsigned long src_faults, dst_faults; 5590 unsigned long src_faults, dst_faults;
5525 int src_nid, dst_nid; 5591 int src_nid, dst_nid;
5526 5592
5527 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) 5593 if (!static_branch_likely(&sched_numa_balancing))
5528 return -1; 5594 return -1;
5529 5595
5530 if (!sched_feat(NUMA)) 5596 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
5531 return -1; 5597 return -1;
5532 5598
5533 src_nid = cpu_to_node(env->src_cpu); 5599 src_nid = cpu_to_node(env->src_cpu);
@@ -5933,7 +5999,7 @@ struct sg_lb_stats {
5933 unsigned long sum_weighted_load; /* Weighted load of group's tasks */ 5999 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
5934 unsigned long load_per_task; 6000 unsigned long load_per_task;
5935 unsigned long group_capacity; 6001 unsigned long group_capacity;
5936 unsigned long group_usage; /* Total usage of the group */ 6002 unsigned long group_util; /* Total utilization of the group */
5937 unsigned int sum_nr_running; /* Nr tasks running in the group */ 6003 unsigned int sum_nr_running; /* Nr tasks running in the group */
5938 unsigned int idle_cpus; 6004 unsigned int idle_cpus;
5939 unsigned int group_weight; 6005 unsigned int group_weight;
@@ -6009,19 +6075,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
6009 return load_idx; 6075 return load_idx;
6010} 6076}
6011 6077
6012static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
6013{
6014 if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
6015 return sd->smt_gain / sd->span_weight;
6016
6017 return SCHED_CAPACITY_SCALE;
6018}
6019
6020unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
6021{
6022 return default_scale_cpu_capacity(sd, cpu);
6023}
6024
6025static unsigned long scale_rt_capacity(int cpu) 6078static unsigned long scale_rt_capacity(int cpu)
6026{ 6079{
6027 struct rq *rq = cpu_rq(cpu); 6080 struct rq *rq = cpu_rq(cpu);
@@ -6051,16 +6104,9 @@ static unsigned long scale_rt_capacity(int cpu)
6051 6104
6052static void update_cpu_capacity(struct sched_domain *sd, int cpu) 6105static void update_cpu_capacity(struct sched_domain *sd, int cpu)
6053{ 6106{
6054 unsigned long capacity = SCHED_CAPACITY_SCALE; 6107 unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
6055 struct sched_group *sdg = sd->groups; 6108 struct sched_group *sdg = sd->groups;
6056 6109
6057 if (sched_feat(ARCH_CAPACITY))
6058 capacity *= arch_scale_cpu_capacity(sd, cpu);
6059 else
6060 capacity *= default_scale_cpu_capacity(sd, cpu);
6061
6062 capacity >>= SCHED_CAPACITY_SHIFT;
6063
6064 cpu_rq(cpu)->cpu_capacity_orig = capacity; 6110 cpu_rq(cpu)->cpu_capacity_orig = capacity;
6065 6111
6066 capacity *= scale_rt_capacity(cpu); 6112 capacity *= scale_rt_capacity(cpu);
@@ -6186,8 +6232,8 @@ static inline int sg_imbalanced(struct sched_group *group)
6186 * group_has_capacity returns true if the group has spare capacity that could 6232 * group_has_capacity returns true if the group has spare capacity that could
6187 * be used by some tasks. 6233 * be used by some tasks.
6188 * We consider that a group has spare capacity if the * number of task is 6234 * We consider that a group has spare capacity if the * number of task is
6189 * smaller than the number of CPUs or if the usage is lower than the available 6235 * smaller than the number of CPUs or if the utilization is lower than the
6190 * capacity for CFS tasks. 6236 * available capacity for CFS tasks.
6191 * For the latter, we use a threshold to stabilize the state, to take into 6237 * For the latter, we use a threshold to stabilize the state, to take into
6192 * account the variance of the tasks' load and to return true if the available 6238 * account the variance of the tasks' load and to return true if the available
6193 * capacity in meaningful for the load balancer. 6239 * capacity in meaningful for the load balancer.
@@ -6201,7 +6247,7 @@ group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
6201 return true; 6247 return true;
6202 6248
6203 if ((sgs->group_capacity * 100) > 6249 if ((sgs->group_capacity * 100) >
6204 (sgs->group_usage * env->sd->imbalance_pct)) 6250 (sgs->group_util * env->sd->imbalance_pct))
6205 return true; 6251 return true;
6206 6252
6207 return false; 6253 return false;
@@ -6222,15 +6268,15 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
6222 return false; 6268 return false;
6223 6269
6224 if ((sgs->group_capacity * 100) < 6270 if ((sgs->group_capacity * 100) <
6225 (sgs->group_usage * env->sd->imbalance_pct)) 6271 (sgs->group_util * env->sd->imbalance_pct))
6226 return true; 6272 return true;
6227 6273
6228 return false; 6274 return false;
6229} 6275}
6230 6276
6231static enum group_type group_classify(struct lb_env *env, 6277static inline enum
6232 struct sched_group *group, 6278group_type group_classify(struct sched_group *group,
6233 struct sg_lb_stats *sgs) 6279 struct sg_lb_stats *sgs)
6234{ 6280{
6235 if (sgs->group_no_capacity) 6281 if (sgs->group_no_capacity)
6236 return group_overloaded; 6282 return group_overloaded;
@@ -6270,7 +6316,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
6270 load = source_load(i, load_idx); 6316 load = source_load(i, load_idx);
6271 6317
6272 sgs->group_load += load; 6318 sgs->group_load += load;
6273 sgs->group_usage += get_cpu_usage(i); 6319 sgs->group_util += cpu_util(i);
6274 sgs->sum_nr_running += rq->cfs.h_nr_running; 6320 sgs->sum_nr_running += rq->cfs.h_nr_running;
6275 6321
6276 if (rq->nr_running > 1) 6322 if (rq->nr_running > 1)
@@ -6295,7 +6341,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
6295 sgs->group_weight = group->group_weight; 6341 sgs->group_weight = group->group_weight;
6296 6342
6297 sgs->group_no_capacity = group_is_overloaded(env, sgs); 6343 sgs->group_no_capacity = group_is_overloaded(env, sgs);
6298 sgs->group_type = group_classify(env, group, sgs); 6344 sgs->group_type = group_classify(group, sgs);
6299} 6345}
6300 6346
6301/** 6347/**
@@ -6429,7 +6475,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
6429 group_has_capacity(env, &sds->local_stat) && 6475 group_has_capacity(env, &sds->local_stat) &&
6430 (sgs->sum_nr_running > 1)) { 6476 (sgs->sum_nr_running > 1)) {
6431 sgs->group_no_capacity = 1; 6477 sgs->group_no_capacity = 1;
6432 sgs->group_type = group_overloaded; 6478 sgs->group_type = group_classify(sg, sgs);
6433 } 6479 }
6434 6480
6435 if (update_sd_pick_busiest(env, sds, sg, sgs)) { 6481 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
@@ -7609,8 +7655,22 @@ out:
7609 * When the cpu is attached to null domain for ex, it will not be 7655 * When the cpu is attached to null domain for ex, it will not be
7610 * updated. 7656 * updated.
7611 */ 7657 */
7612 if (likely(update_next_balance)) 7658 if (likely(update_next_balance)) {
7613 rq->next_balance = next_balance; 7659 rq->next_balance = next_balance;
7660
7661#ifdef CONFIG_NO_HZ_COMMON
7662 /*
7663 * If this CPU has been elected to perform the nohz idle
7664 * balance. Other idle CPUs have already rebalanced with
7665 * nohz_idle_balance() and nohz.next_balance has been
7666 * updated accordingly. This CPU is now running the idle load
7667 * balance for itself and we need to update the
7668 * nohz.next_balance accordingly.
7669 */
7670 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
7671 nohz.next_balance = rq->next_balance;
7672#endif
7673 }
7614} 7674}
7615 7675
7616#ifdef CONFIG_NO_HZ_COMMON 7676#ifdef CONFIG_NO_HZ_COMMON
@@ -7623,6 +7683,9 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
7623 int this_cpu = this_rq->cpu; 7683 int this_cpu = this_rq->cpu;
7624 struct rq *rq; 7684 struct rq *rq;
7625 int balance_cpu; 7685 int balance_cpu;
7686 /* Earliest time when we have to do rebalance again */
7687 unsigned long next_balance = jiffies + 60*HZ;
7688 int update_next_balance = 0;
7626 7689
7627 if (idle != CPU_IDLE || 7690 if (idle != CPU_IDLE ||
7628 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) 7691 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
@@ -7654,10 +7717,19 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
7654 rebalance_domains(rq, CPU_IDLE); 7717 rebalance_domains(rq, CPU_IDLE);
7655 } 7718 }
7656 7719
7657 if (time_after(this_rq->next_balance, rq->next_balance)) 7720 if (time_after(next_balance, rq->next_balance)) {
7658 this_rq->next_balance = rq->next_balance; 7721 next_balance = rq->next_balance;
7722 update_next_balance = 1;
7723 }
7659 } 7724 }
7660 nohz.next_balance = this_rq->next_balance; 7725
7726 /*
7727 * next_balance will be updated only when there is a need.
7728 * When the CPU is attached to null domain for ex, it will not be
7729 * updated.
7730 */
7731 if (likely(update_next_balance))
7732 nohz.next_balance = next_balance;
7661end: 7733end:
7662 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); 7734 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
7663} 7735}
@@ -7810,7 +7882,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
7810 entity_tick(cfs_rq, se, queued); 7882 entity_tick(cfs_rq, se, queued);
7811 } 7883 }
7812 7884
7813 if (numabalancing_enabled) 7885 if (static_branch_unlikely(&sched_numa_balancing))
7814 task_tick_numa(rq, curr); 7886 task_tick_numa(rq, curr);
7815} 7887}
7816 7888
@@ -7886,21 +7958,39 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
7886 check_preempt_curr(rq, p, 0); 7958 check_preempt_curr(rq, p, 0);
7887} 7959}
7888 7960
7889static void switched_from_fair(struct rq *rq, struct task_struct *p) 7961static inline bool vruntime_normalized(struct task_struct *p)
7890{ 7962{
7891 struct sched_entity *se = &p->se; 7963 struct sched_entity *se = &p->se;
7892 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7893 7964
7894 /* 7965 /*
7895 * Ensure the task's vruntime is normalized, so that when it's 7966 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
7896 * switched back to the fair class the enqueue_entity(.flags=0) will 7967 * the dequeue_entity(.flags=0) will already have normalized the
7897 * do the right thing. 7968 * vruntime.
7969 */
7970 if (p->on_rq)
7971 return true;
7972
7973 /*
7974 * When !on_rq, vruntime of the task has usually NOT been normalized.
7975 * But there are some cases where it has already been normalized:
7898 * 7976 *
7899 * If it's queued, then the dequeue_entity(.flags=0) will already 7977 * - A forked child which is waiting for being woken up by
7900 * have normalized the vruntime, if it's !queued, then only when 7978 * wake_up_new_task().
7901 * the task is sleeping will it still have non-normalized vruntime. 7979 * - A task which has been woken up by try_to_wake_up() and
7980 * waiting for actually being woken up by sched_ttwu_pending().
7902 */ 7981 */
7903 if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) { 7982 if (!se->sum_exec_runtime || p->state == TASK_WAKING)
7983 return true;
7984
7985 return false;
7986}
7987
7988static void detach_task_cfs_rq(struct task_struct *p)
7989{
7990 struct sched_entity *se = &p->se;
7991 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7992
7993 if (!vruntime_normalized(p)) {
7904 /* 7994 /*
7905 * Fix up our vruntime so that the current sleep doesn't 7995 * Fix up our vruntime so that the current sleep doesn't
7906 * cause 'unlimited' sleep bonus. 7996 * cause 'unlimited' sleep bonus.
@@ -7909,28 +7999,14 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
7909 se->vruntime -= cfs_rq->min_vruntime; 7999 se->vruntime -= cfs_rq->min_vruntime;
7910 } 8000 }
7911 8001
7912#ifdef CONFIG_SMP
7913 /* Catch up with the cfs_rq and remove our load when we leave */ 8002 /* Catch up with the cfs_rq and remove our load when we leave */
7914 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq), &se->avg, 8003 detach_entity_load_avg(cfs_rq, se);
7915 se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
7916
7917 cfs_rq->avg.load_avg =
7918 max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
7919 cfs_rq->avg.load_sum =
7920 max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
7921 cfs_rq->avg.util_avg =
7922 max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
7923 cfs_rq->avg.util_sum =
7924 max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
7925#endif
7926} 8004}
7927 8005
7928/* 8006static void attach_task_cfs_rq(struct task_struct *p)
7929 * We switched to the sched_fair class.
7930 */
7931static void switched_to_fair(struct rq *rq, struct task_struct *p)
7932{ 8007{
7933 struct sched_entity *se = &p->se; 8008 struct sched_entity *se = &p->se;
8009 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7934 8010
7935#ifdef CONFIG_FAIR_GROUP_SCHED 8011#ifdef CONFIG_FAIR_GROUP_SCHED
7936 /* 8012 /*
@@ -7940,31 +8016,33 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
7940 se->depth = se->parent ? se->parent->depth + 1 : 0; 8016 se->depth = se->parent ? se->parent->depth + 1 : 0;
7941#endif 8017#endif
7942 8018
7943 if (!task_on_rq_queued(p)) { 8019 /* Synchronize task with its cfs_rq */
8020 attach_entity_load_avg(cfs_rq, se);
8021
8022 if (!vruntime_normalized(p))
8023 se->vruntime += cfs_rq->min_vruntime;
8024}
8025
8026static void switched_from_fair(struct rq *rq, struct task_struct *p)
8027{
8028 detach_task_cfs_rq(p);
8029}
8030
8031static void switched_to_fair(struct rq *rq, struct task_struct *p)
8032{
8033 attach_task_cfs_rq(p);
7944 8034
8035 if (task_on_rq_queued(p)) {
7945 /* 8036 /*
7946 * Ensure the task has a non-normalized vruntime when it is switched 8037 * We were most likely switched from sched_rt, so
7947 * back to the fair class with !queued, so that enqueue_entity() at 8038 * kick off the schedule if running, otherwise just see
7948 * wake-up time will do the right thing. 8039 * if we can still preempt the current task.
7949 *
7950 * If it's queued, then the enqueue_entity(.flags=0) makes the task
7951 * has non-normalized vruntime, if it's !queued, then it still has
7952 * normalized vruntime.
7953 */ 8040 */
7954 if (p->state != TASK_RUNNING) 8041 if (rq->curr == p)
7955 se->vruntime += cfs_rq_of(se)->min_vruntime; 8042 resched_curr(rq);
7956 return; 8043 else
8044 check_preempt_curr(rq, p, 0);
7957 } 8045 }
7958
7959 /*
7960 * We were most likely switched from sched_rt, so
7961 * kick off the schedule if running, otherwise just see
7962 * if we can still preempt the current task.
7963 */
7964 if (rq->curr == p)
7965 resched_curr(rq);
7966 else
7967 check_preempt_curr(rq, p, 0);
7968} 8046}
7969 8047
7970/* Account for a task changing its policy or group. 8048/* Account for a task changing its policy or group.
@@ -7999,56 +8077,16 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
7999} 8077}
8000 8078
8001#ifdef CONFIG_FAIR_GROUP_SCHED 8079#ifdef CONFIG_FAIR_GROUP_SCHED
8002static void task_move_group_fair(struct task_struct *p, int queued) 8080static void task_move_group_fair(struct task_struct *p)
8003{ 8081{
8004 struct sched_entity *se = &p->se; 8082 detach_task_cfs_rq(p);
8005 struct cfs_rq *cfs_rq;
8006
8007 /*
8008 * If the task was not on the rq at the time of this cgroup movement
8009 * it must have been asleep, sleeping tasks keep their ->vruntime
8010 * absolute on their old rq until wakeup (needed for the fair sleeper
8011 * bonus in place_entity()).
8012 *
8013 * If it was on the rq, we've just 'preempted' it, which does convert
8014 * ->vruntime to a relative base.
8015 *
8016 * Make sure both cases convert their relative position when migrating
8017 * to another cgroup's rq. This does somewhat interfere with the
8018 * fair sleeper stuff for the first placement, but who cares.
8019 */
8020 /*
8021 * When !queued, vruntime of the task has usually NOT been normalized.
8022 * But there are some cases where it has already been normalized:
8023 *
8024 * - Moving a forked child which is waiting for being woken up by
8025 * wake_up_new_task().
8026 * - Moving a task which has been woken up by try_to_wake_up() and
8027 * waiting for actually being woken up by sched_ttwu_pending().
8028 *
8029 * To prevent boost or penalty in the new cfs_rq caused by delta
8030 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
8031 */
8032 if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
8033 queued = 1;
8034
8035 if (!queued)
8036 se->vruntime -= cfs_rq_of(se)->min_vruntime;
8037 set_task_rq(p, task_cpu(p)); 8083 set_task_rq(p, task_cpu(p));
8038 se->depth = se->parent ? se->parent->depth + 1 : 0;
8039 if (!queued) {
8040 cfs_rq = cfs_rq_of(se);
8041 se->vruntime += cfs_rq->min_vruntime;
8042 8084
8043#ifdef CONFIG_SMP 8085#ifdef CONFIG_SMP
8044 /* Virtually synchronize task with its new cfs_rq */ 8086 /* Tell se's cfs_rq has been changed -- migrated */
8045 p->se.avg.last_update_time = cfs_rq->avg.last_update_time; 8087 p->se.avg.last_update_time = 0;
8046 cfs_rq->avg.load_avg += p->se.avg.load_avg;
8047 cfs_rq->avg.load_sum += p->se.avg.load_sum;
8048 cfs_rq->avg.util_avg += p->se.avg.util_avg;
8049 cfs_rq->avg.util_sum += p->se.avg.util_sum;
8050#endif 8088#endif
8051 } 8089 attach_task_cfs_rq(p);
8052} 8090}
8053 8091
8054void free_fair_sched_group(struct task_group *tg) 8092void free_fair_sched_group(struct task_group *tg)
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 83a50e7ca533..69631fa46c2f 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -36,11 +36,6 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true)
36 */ 36 */
37SCHED_FEAT(WAKEUP_PREEMPTION, true) 37SCHED_FEAT(WAKEUP_PREEMPTION, true)
38 38
39/*
40 * Use arch dependent cpu capacity functions
41 */
42SCHED_FEAT(ARCH_CAPACITY, true)
43
44SCHED_FEAT(HRTICK, false) 39SCHED_FEAT(HRTICK, false)
45SCHED_FEAT(DOUBLE_TICK, false) 40SCHED_FEAT(DOUBLE_TICK, false)
46SCHED_FEAT(LB_BIAS, true) 41SCHED_FEAT(LB_BIAS, true)
@@ -72,19 +67,5 @@ SCHED_FEAT(RT_PUSH_IPI, true)
72SCHED_FEAT(FORCE_SD_OVERLAP, false) 67SCHED_FEAT(FORCE_SD_OVERLAP, false)
73SCHED_FEAT(RT_RUNTIME_SHARE, true) 68SCHED_FEAT(RT_RUNTIME_SHARE, true)
74SCHED_FEAT(LB_MIN, false) 69SCHED_FEAT(LB_MIN, false)
70SCHED_FEAT(ATTACH_AGE_LOAD, true)
75 71
76/*
77 * Apply the automatic NUMA scheduling policy. Enabled automatically
78 * at runtime if running on a NUMA machine. Can be controlled via
79 * numa_balancing=
80 */
81#ifdef CONFIG_NUMA_BALANCING
82
83/*
84 * NUMA will favor moving tasks towards nodes where a higher number of
85 * hinting faults are recorded during active load balancing. It will
86 * resist moving tasks towards nodes where a lower number of hinting
87 * faults have been recorded.
88 */
89SCHED_FEAT(NUMA, true)
90#endif
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8f177c73ae19..4a2ef5a02fd3 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -57,9 +57,11 @@ static inline int cpu_idle_poll(void)
57 rcu_idle_enter(); 57 rcu_idle_enter();
58 trace_cpu_idle_rcuidle(0, smp_processor_id()); 58 trace_cpu_idle_rcuidle(0, smp_processor_id());
59 local_irq_enable(); 59 local_irq_enable();
60 stop_critical_timings();
60 while (!tif_need_resched() && 61 while (!tif_need_resched() &&
61 (cpu_idle_force_poll || tick_check_broadcast_expired())) 62 (cpu_idle_force_poll || tick_check_broadcast_expired()))
62 cpu_relax(); 63 cpu_relax();
64 start_critical_timings();
63 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 65 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
64 rcu_idle_exit(); 66 rcu_idle_exit();
65 return 1; 67 return 1;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index d2ea59364a1c..e3cc16312046 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -635,11 +635,11 @@ bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
635/* 635/*
636 * We ran out of runtime, see if we can borrow some from our neighbours. 636 * We ran out of runtime, see if we can borrow some from our neighbours.
637 */ 637 */
638static int do_balance_runtime(struct rt_rq *rt_rq) 638static void do_balance_runtime(struct rt_rq *rt_rq)
639{ 639{
640 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 640 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
641 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; 641 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
642 int i, weight, more = 0; 642 int i, weight;
643 u64 rt_period; 643 u64 rt_period;
644 644
645 weight = cpumask_weight(rd->span); 645 weight = cpumask_weight(rd->span);
@@ -673,7 +673,6 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
673 diff = rt_period - rt_rq->rt_runtime; 673 diff = rt_period - rt_rq->rt_runtime;
674 iter->rt_runtime -= diff; 674 iter->rt_runtime -= diff;
675 rt_rq->rt_runtime += diff; 675 rt_rq->rt_runtime += diff;
676 more = 1;
677 if (rt_rq->rt_runtime == rt_period) { 676 if (rt_rq->rt_runtime == rt_period) {
678 raw_spin_unlock(&iter->rt_runtime_lock); 677 raw_spin_unlock(&iter->rt_runtime_lock);
679 break; 678 break;
@@ -683,8 +682,6 @@ next:
683 raw_spin_unlock(&iter->rt_runtime_lock); 682 raw_spin_unlock(&iter->rt_runtime_lock);
684 } 683 }
685 raw_spin_unlock(&rt_b->rt_runtime_lock); 684 raw_spin_unlock(&rt_b->rt_runtime_lock);
686
687 return more;
688} 685}
689 686
690/* 687/*
@@ -796,26 +793,19 @@ static void __enable_runtime(struct rq *rq)
796 } 793 }
797} 794}
798 795
799static int balance_runtime(struct rt_rq *rt_rq) 796static void balance_runtime(struct rt_rq *rt_rq)
800{ 797{
801 int more = 0;
802
803 if (!sched_feat(RT_RUNTIME_SHARE)) 798 if (!sched_feat(RT_RUNTIME_SHARE))
804 return more; 799 return;
805 800
806 if (rt_rq->rt_time > rt_rq->rt_runtime) { 801 if (rt_rq->rt_time > rt_rq->rt_runtime) {
807 raw_spin_unlock(&rt_rq->rt_runtime_lock); 802 raw_spin_unlock(&rt_rq->rt_runtime_lock);
808 more = do_balance_runtime(rt_rq); 803 do_balance_runtime(rt_rq);
809 raw_spin_lock(&rt_rq->rt_runtime_lock); 804 raw_spin_lock(&rt_rq->rt_runtime_lock);
810 } 805 }
811
812 return more;
813} 806}
814#else /* !CONFIG_SMP */ 807#else /* !CONFIG_SMP */
815static inline int balance_runtime(struct rt_rq *rt_rq) 808static inline void balance_runtime(struct rt_rq *rt_rq) {}
816{
817 return 0;
818}
819#endif /* CONFIG_SMP */ 809#endif /* CONFIG_SMP */
820 810
821static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 811static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6d2a119c7ad9..efd3bfc7e347 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -84,6 +84,10 @@ static inline void update_cpu_load_active(struct rq *this_rq) { }
84 */ 84 */
85#define RUNTIME_INF ((u64)~0ULL) 85#define RUNTIME_INF ((u64)~0ULL)
86 86
87static inline int idle_policy(int policy)
88{
89 return policy == SCHED_IDLE;
90}
87static inline int fair_policy(int policy) 91static inline int fair_policy(int policy)
88{ 92{
89 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 93 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
@@ -98,6 +102,11 @@ static inline int dl_policy(int policy)
98{ 102{
99 return policy == SCHED_DEADLINE; 103 return policy == SCHED_DEADLINE;
100} 104}
105static inline bool valid_policy(int policy)
106{
107 return idle_policy(policy) || fair_policy(policy) ||
108 rt_policy(policy) || dl_policy(policy);
109}
101 110
102static inline int task_has_rt_policy(struct task_struct *p) 111static inline int task_has_rt_policy(struct task_struct *p)
103{ 112{
@@ -109,11 +118,6 @@ static inline int task_has_dl_policy(struct task_struct *p)
109 return dl_policy(p->policy); 118 return dl_policy(p->policy);
110} 119}
111 120
112static inline bool dl_time_before(u64 a, u64 b)
113{
114 return (s64)(a - b) < 0;
115}
116
117/* 121/*
118 * Tells if entity @a should preempt entity @b. 122 * Tells if entity @a should preempt entity @b.
119 */ 123 */
@@ -1003,17 +1007,7 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1003#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1007#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1004#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ 1008#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
1005 1009
1006#ifdef CONFIG_NUMA_BALANCING 1010extern struct static_key_false sched_numa_balancing;
1007#define sched_feat_numa(x) sched_feat(x)
1008#ifdef CONFIG_SCHED_DEBUG
1009#define numabalancing_enabled sched_feat_numa(NUMA)
1010#else
1011extern bool numabalancing_enabled;
1012#endif /* CONFIG_SCHED_DEBUG */
1013#else
1014#define sched_feat_numa(x) (0)
1015#define numabalancing_enabled (0)
1016#endif /* CONFIG_NUMA_BALANCING */
1017 1011
1018static inline u64 global_rt_period(void) 1012static inline u64 global_rt_period(void)
1019{ 1013{
@@ -1157,16 +1151,18 @@ static const u32 prio_to_wmult[40] = {
1157 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 1151 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1158}; 1152};
1159 1153
1160#define ENQUEUE_WAKEUP 1 1154#define ENQUEUE_WAKEUP 0x01
1161#define ENQUEUE_HEAD 2 1155#define ENQUEUE_HEAD 0x02
1162#ifdef CONFIG_SMP 1156#ifdef CONFIG_SMP
1163#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ 1157#define ENQUEUE_WAKING 0x04 /* sched_class::task_waking was called */
1164#else 1158#else
1165#define ENQUEUE_WAKING 0 1159#define ENQUEUE_WAKING 0x00
1166#endif 1160#endif
1167#define ENQUEUE_REPLENISH 8 1161#define ENQUEUE_REPLENISH 0x08
1162#define ENQUEUE_RESTORE 0x10
1168 1163
1169#define DEQUEUE_SLEEP 1 1164#define DEQUEUE_SLEEP 0x01
1165#define DEQUEUE_SAVE 0x02
1170 1166
1171#define RETRY_TASK ((void *)-1UL) 1167#define RETRY_TASK ((void *)-1UL)
1172 1168
@@ -1194,7 +1190,7 @@ struct sched_class {
1194 1190
1195#ifdef CONFIG_SMP 1191#ifdef CONFIG_SMP
1196 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1192 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1197 void (*migrate_task_rq)(struct task_struct *p, int next_cpu); 1193 void (*migrate_task_rq)(struct task_struct *p);
1198 1194
1199 void (*task_waking) (struct task_struct *task); 1195 void (*task_waking) (struct task_struct *task);
1200 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1196 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
@@ -1227,7 +1223,7 @@ struct sched_class {
1227 void (*update_curr) (struct rq *rq); 1223 void (*update_curr) (struct rq *rq);
1228 1224
1229#ifdef CONFIG_FAIR_GROUP_SCHED 1225#ifdef CONFIG_FAIR_GROUP_SCHED
1230 void (*task_move_group) (struct task_struct *p, int on_rq); 1226 void (*task_move_group) (struct task_struct *p);
1231#endif 1227#endif
1232}; 1228};
1233 1229
@@ -1405,6 +1401,17 @@ unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1405} 1401}
1406#endif 1402#endif
1407 1403
1404#ifndef arch_scale_cpu_capacity
1405static __always_inline
1406unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1407{
1408 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
1409 return sd->smt_gain / sd->span_weight;
1410
1411 return SCHED_CAPACITY_SCALE;
1412}
1413#endif
1414
1408static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1415static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1409{ 1416{
1410 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); 1417 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index a818cbc73e14..d264f59bff56 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -222,9 +222,8 @@ static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cp
222{ 222{
223 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 223 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
224 224
225 if (ht->pre_unpark) 225 if (!ht->selfparking)
226 ht->pre_unpark(cpu); 226 kthread_unpark(tsk);
227 kthread_unpark(tsk);
228} 227}
229 228
230void smpboot_unpark_threads(unsigned int cpu) 229void smpboot_unpark_threads(unsigned int cpu)
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 12484e5d5c88..867bc20e1ef1 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -73,21 +73,24 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
73 } 73 }
74} 74}
75 75
76static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
77 struct cpu_stop_work *work)
78{
79 list_add_tail(&work->list, &stopper->works);
80 wake_up_process(stopper->thread);
81}
82
76/* queue @work to @stopper. if offline, @work is completed immediately */ 83/* queue @work to @stopper. if offline, @work is completed immediately */
77static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) 84static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
78{ 85{
79 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 86 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
80
81 unsigned long flags; 87 unsigned long flags;
82 88
83 spin_lock_irqsave(&stopper->lock, flags); 89 spin_lock_irqsave(&stopper->lock, flags);
84 90 if (stopper->enabled)
85 if (stopper->enabled) { 91 __cpu_stop_queue_work(stopper, work);
86 list_add_tail(&work->list, &stopper->works); 92 else
87 wake_up_process(stopper->thread);
88 } else
89 cpu_stop_signal_done(work->done, false); 93 cpu_stop_signal_done(work->done, false);
90
91 spin_unlock_irqrestore(&stopper->lock, flags); 94 spin_unlock_irqrestore(&stopper->lock, flags);
92} 95}
93 96
@@ -213,6 +216,31 @@ static int multi_cpu_stop(void *data)
213 return err; 216 return err;
214} 217}
215 218
219static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
220 int cpu2, struct cpu_stop_work *work2)
221{
222 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
223 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
224 int err;
225
226 lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
227 spin_lock_irq(&stopper1->lock);
228 spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
229
230 err = -ENOENT;
231 if (!stopper1->enabled || !stopper2->enabled)
232 goto unlock;
233
234 err = 0;
235 __cpu_stop_queue_work(stopper1, work1);
236 __cpu_stop_queue_work(stopper2, work2);
237unlock:
238 spin_unlock(&stopper2->lock);
239 spin_unlock_irq(&stopper1->lock);
240 lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
241
242 return err;
243}
216/** 244/**
217 * stop_two_cpus - stops two cpus 245 * stop_two_cpus - stops two cpus
218 * @cpu1: the cpu to stop 246 * @cpu1: the cpu to stop
@@ -247,24 +275,13 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
247 cpu_stop_init_done(&done, 2); 275 cpu_stop_init_done(&done, 2);
248 set_state(&msdata, MULTI_STOP_PREPARE); 276 set_state(&msdata, MULTI_STOP_PREPARE);
249 277
250 /* 278 if (cpu1 > cpu2)
251 * If we observe both CPUs active we know _cpu_down() cannot yet have 279 swap(cpu1, cpu2);
252 * queued its stop_machine works and therefore ours will get executed 280 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
253 * first. Or its not either one of our CPUs that's getting unplugged,
254 * in which case we don't care.
255 *
256 * This relies on the stopper workqueues to be FIFO.
257 */
258 if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
259 preempt_enable(); 281 preempt_enable();
260 return -ENOENT; 282 return -ENOENT;
261 } 283 }
262 284
263 lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
264 cpu_stop_queue_work(cpu1, &work1);
265 cpu_stop_queue_work(cpu2, &work2);
266 lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
267
268 preempt_enable(); 285 preempt_enable();
269 286
270 wait_for_completion(&done.completion); 287 wait_for_completion(&done.completion);
@@ -452,6 +469,18 @@ repeat:
452 } 469 }
453} 470}
454 471
472void stop_machine_park(int cpu)
473{
474 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
475 /*
476 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
477 * the pending works before it parks, until then it is fine to queue
478 * the new works.
479 */
480 stopper->enabled = false;
481 kthread_park(stopper->thread);
482}
483
455extern void sched_set_stop_task(int cpu, struct task_struct *stop); 484extern void sched_set_stop_task(int cpu, struct task_struct *stop);
456 485
457static void cpu_stop_create(unsigned int cpu) 486static void cpu_stop_create(unsigned int cpu)
@@ -462,26 +491,16 @@ static void cpu_stop_create(unsigned int cpu)
462static void cpu_stop_park(unsigned int cpu) 491static void cpu_stop_park(unsigned int cpu)
463{ 492{
464 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 493 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
465 struct cpu_stop_work *work, *tmp;
466 unsigned long flags;
467 494
468 /* drain remaining works */ 495 WARN_ON(!list_empty(&stopper->works));
469 spin_lock_irqsave(&stopper->lock, flags);
470 list_for_each_entry_safe(work, tmp, &stopper->works, list) {
471 list_del_init(&work->list);
472 cpu_stop_signal_done(work->done, false);
473 }
474 stopper->enabled = false;
475 spin_unlock_irqrestore(&stopper->lock, flags);
476} 496}
477 497
478static void cpu_stop_unpark(unsigned int cpu) 498void stop_machine_unpark(int cpu)
479{ 499{
480 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 500 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
481 501
482 spin_lock_irq(&stopper->lock);
483 stopper->enabled = true; 502 stopper->enabled = true;
484 spin_unlock_irq(&stopper->lock); 503 kthread_unpark(stopper->thread);
485} 504}
486 505
487static struct smp_hotplug_thread cpu_stop_threads = { 506static struct smp_hotplug_thread cpu_stop_threads = {
@@ -490,9 +509,7 @@ static struct smp_hotplug_thread cpu_stop_threads = {
490 .thread_fn = cpu_stopper_thread, 509 .thread_fn = cpu_stopper_thread,
491 .thread_comm = "migration/%u", 510 .thread_comm = "migration/%u",
492 .create = cpu_stop_create, 511 .create = cpu_stop_create,
493 .setup = cpu_stop_unpark,
494 .park = cpu_stop_park, 512 .park = cpu_stop_park,
495 .pre_unpark = cpu_stop_unpark,
496 .selfparking = true, 513 .selfparking = true,
497}; 514};
498 515
@@ -508,6 +525,7 @@ static int __init cpu_stop_init(void)
508 } 525 }
509 526
510 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); 527 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
528 stop_machine_unpark(raw_smp_processor_id());
511 stop_machine_initialized = true; 529 stop_machine_initialized = true;
512 return 0; 530 return 0;
513} 531}
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 3a38775b50c2..0d8fe8b8f727 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -479,7 +479,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
479 * return half the number of nanoseconds the hardware counter can technically 479 * return half the number of nanoseconds the hardware counter can technically
480 * cover. This is done so that we can potentially detect problems caused by 480 * cover. This is done so that we can potentially detect problems caused by
481 * delayed timers or bad hardware, which might result in time intervals that 481 * delayed timers or bad hardware, which might result in time intervals that
482 * are larger then what the math used can handle without overflows. 482 * are larger than what the math used can handle without overflows.
483 */ 483 */
484u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) 484u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
485{ 485{
@@ -595,16 +595,15 @@ static void __clocksource_select(bool skipcur)
595 */ 595 */
596static void clocksource_select(void) 596static void clocksource_select(void)
597{ 597{
598 return __clocksource_select(false); 598 __clocksource_select(false);
599} 599}
600 600
601static void clocksource_select_fallback(void) 601static void clocksource_select_fallback(void)
602{ 602{
603 return __clocksource_select(true); 603 __clocksource_select(true);
604} 604}
605 605
606#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ 606#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
607
608static inline void clocksource_select(void) { } 607static inline void clocksource_select(void) { }
609static inline void clocksource_select_fallback(void) { } 608static inline void clocksource_select_fallback(void) { }
610 609
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 457a373e2181..435b8850dd80 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -59,7 +59,7 @@
59/* 59/*
60 * The timer bases: 60 * The timer bases:
61 * 61 *
62 * There are more clockids then hrtimer bases. Thus, we index 62 * There are more clockids than hrtimer bases. Thus, we index
63 * into the timer bases by the hrtimer_base_type enum. When trying 63 * into the timer bases by the hrtimer_base_type enum. When trying
64 * to reach a base using a clockid, hrtimer_clockid_to_base() 64 * to reach a base using a clockid, hrtimer_clockid_to_base()
65 * is used to convert from clockid to the proper hrtimer_base_type. 65 * is used to convert from clockid to the proper hrtimer_base_type.
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index df68cb875248..149cc8086aea 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -99,7 +99,7 @@ static time64_t ntp_next_leap_sec = TIME64_MAX;
99static int pps_valid; /* signal watchdog counter */ 99static int pps_valid; /* signal watchdog counter */
100static long pps_tf[3]; /* phase median filter */ 100static long pps_tf[3]; /* phase median filter */
101static long pps_jitter; /* current jitter (ns) */ 101static long pps_jitter; /* current jitter (ns) */
102static struct timespec pps_fbase; /* beginning of the last freq interval */ 102static struct timespec64 pps_fbase; /* beginning of the last freq interval */
103static int pps_shift; /* current interval duration (s) (shift) */ 103static int pps_shift; /* current interval duration (s) (shift) */
104static int pps_intcnt; /* interval counter */ 104static int pps_intcnt; /* interval counter */
105static s64 pps_freq; /* frequency offset (scaled ns/s) */ 105static s64 pps_freq; /* frequency offset (scaled ns/s) */
@@ -509,7 +509,7 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
509static void sync_cmos_clock(struct work_struct *work) 509static void sync_cmos_clock(struct work_struct *work)
510{ 510{
511 struct timespec64 now; 511 struct timespec64 now;
512 struct timespec next; 512 struct timespec64 next;
513 int fail = 1; 513 int fail = 1;
514 514
515 /* 515 /*
@@ -559,7 +559,7 @@ static void sync_cmos_clock(struct work_struct *work)
559 next.tv_nsec -= NSEC_PER_SEC; 559 next.tv_nsec -= NSEC_PER_SEC;
560 } 560 }
561 queue_delayed_work(system_power_efficient_wq, 561 queue_delayed_work(system_power_efficient_wq,
562 &sync_cmos_work, timespec_to_jiffies(&next)); 562 &sync_cmos_work, timespec64_to_jiffies(&next));
563} 563}
564 564
565void ntp_notify_cmos_timer(void) 565void ntp_notify_cmos_timer(void)
@@ -773,13 +773,13 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
773 * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] 773 * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ]
774 * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */ 774 * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */
775struct pps_normtime { 775struct pps_normtime {
776 __kernel_time_t sec; /* seconds */ 776 s64 sec; /* seconds */
777 long nsec; /* nanoseconds */ 777 long nsec; /* nanoseconds */
778}; 778};
779 779
780/* normalize the timestamp so that nsec is in the 780/* normalize the timestamp so that nsec is in the
781 ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */ 781 ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */
782static inline struct pps_normtime pps_normalize_ts(struct timespec ts) 782static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts)
783{ 783{
784 struct pps_normtime norm = { 784 struct pps_normtime norm = {
785 .sec = ts.tv_sec, 785 .sec = ts.tv_sec,
@@ -861,7 +861,7 @@ static long hardpps_update_freq(struct pps_normtime freq_norm)
861 pps_errcnt++; 861 pps_errcnt++;
862 pps_dec_freq_interval(); 862 pps_dec_freq_interval();
863 printk_deferred(KERN_ERR 863 printk_deferred(KERN_ERR
864 "hardpps: PPSERROR: interval too long - %ld s\n", 864 "hardpps: PPSERROR: interval too long - %lld s\n",
865 freq_norm.sec); 865 freq_norm.sec);
866 return 0; 866 return 0;
867 } 867 }
@@ -948,7 +948,7 @@ static void hardpps_update_phase(long error)
948 * This code is based on David Mills's reference nanokernel 948 * This code is based on David Mills's reference nanokernel
949 * implementation. It was mostly rewritten but keeps the same idea. 949 * implementation. It was mostly rewritten but keeps the same idea.
950 */ 950 */
951void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) 951void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
952{ 952{
953 struct pps_normtime pts_norm, freq_norm; 953 struct pps_normtime pts_norm, freq_norm;
954 954
@@ -969,7 +969,7 @@ void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
969 } 969 }
970 970
971 /* ok, now we have a base for frequency calculation */ 971 /* ok, now we have a base for frequency calculation */
972 freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase)); 972 freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase));
973 973
974 /* check that the signal is in the range 974 /* check that the signal is in the range
975 * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */ 975 * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h
index 65430504ca26..af924470eac0 100644
--- a/kernel/time/ntp_internal.h
+++ b/kernel/time/ntp_internal.h
@@ -9,5 +9,5 @@ extern ktime_t ntp_get_next_leap(void);
9extern int second_overflow(unsigned long secs); 9extern int second_overflow(unsigned long secs);
10extern int ntp_validate_timex(struct timex *); 10extern int ntp_validate_timex(struct timex *);
11extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); 11extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
12extern void __hardpps(const struct timespec *, const struct timespec *); 12extern void __hardpps(const struct timespec64 *, const struct timespec64 *);
13#endif /* _LINUX_NTP_INTERNAL_H */ 13#endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 892e3dae0aac..f5e86d282d52 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -249,7 +249,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
249 * but barriers are not required because update_gt_cputime() 249 * but barriers are not required because update_gt_cputime()
250 * can handle concurrent updates. 250 * can handle concurrent updates.
251 */ 251 */
252 WRITE_ONCE(cputimer->running, 1); 252 WRITE_ONCE(cputimer->running, true);
253 } 253 }
254 sample_cputime_atomic(times, &cputimer->cputime_atomic); 254 sample_cputime_atomic(times, &cputimer->cputime_atomic);
255} 255}
@@ -864,6 +864,13 @@ static void check_thread_timers(struct task_struct *tsk,
864 unsigned long long expires; 864 unsigned long long expires;
865 unsigned long soft; 865 unsigned long soft;
866 866
867 /*
868 * If cputime_expires is zero, then there are no active
869 * per thread CPU timers.
870 */
871 if (task_cputime_zero(&tsk->cputime_expires))
872 return;
873
867 expires = check_timers_list(timers, firing, prof_ticks(tsk)); 874 expires = check_timers_list(timers, firing, prof_ticks(tsk));
868 tsk_expires->prof_exp = expires_to_cputime(expires); 875 tsk_expires->prof_exp = expires_to_cputime(expires);
869 876
@@ -911,7 +918,7 @@ static inline void stop_process_timers(struct signal_struct *sig)
911 struct thread_group_cputimer *cputimer = &sig->cputimer; 918 struct thread_group_cputimer *cputimer = &sig->cputimer;
912 919
913 /* Turn off cputimer->running. This is done without locking. */ 920 /* Turn off cputimer->running. This is done without locking. */
914 WRITE_ONCE(cputimer->running, 0); 921 WRITE_ONCE(cputimer->running, false);
915} 922}
916 923
917static u32 onecputick; 924static u32 onecputick;
@@ -962,6 +969,19 @@ static void check_process_timers(struct task_struct *tsk,
962 unsigned long soft; 969 unsigned long soft;
963 970
964 /* 971 /*
972 * If cputimer is not running, then there are no active
973 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
974 */
975 if (!READ_ONCE(tsk->signal->cputimer.running))
976 return;
977
978 /*
979 * Signify that a thread is checking for process timers.
980 * Write access to this field is protected by the sighand lock.
981 */
982 sig->cputimer.checking_timer = true;
983
984 /*
965 * Collect the current process totals. 985 * Collect the current process totals.
966 */ 986 */
967 thread_group_cputimer(tsk, &cputime); 987 thread_group_cputimer(tsk, &cputime);
@@ -1015,6 +1035,8 @@ static void check_process_timers(struct task_struct *tsk,
1015 sig->cputime_expires.sched_exp = sched_expires; 1035 sig->cputime_expires.sched_exp = sched_expires;
1016 if (task_cputime_zero(&sig->cputime_expires)) 1036 if (task_cputime_zero(&sig->cputime_expires))
1017 stop_process_timers(sig); 1037 stop_process_timers(sig);
1038
1039 sig->cputimer.checking_timer = false;
1018} 1040}
1019 1041
1020/* 1042/*
@@ -1117,24 +1139,33 @@ static inline int task_cputime_expired(const struct task_cputime *sample,
1117static inline int fastpath_timer_check(struct task_struct *tsk) 1139static inline int fastpath_timer_check(struct task_struct *tsk)
1118{ 1140{
1119 struct signal_struct *sig; 1141 struct signal_struct *sig;
1120 cputime_t utime, stime;
1121
1122 task_cputime(tsk, &utime, &stime);
1123 1142
1124 if (!task_cputime_zero(&tsk->cputime_expires)) { 1143 if (!task_cputime_zero(&tsk->cputime_expires)) {
1125 struct task_cputime task_sample = { 1144 struct task_cputime task_sample;
1126 .utime = utime,
1127 .stime = stime,
1128 .sum_exec_runtime = tsk->se.sum_exec_runtime
1129 };
1130 1145
1146 task_cputime(tsk, &task_sample.utime, &task_sample.stime);
1147 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1131 if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) 1148 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1132 return 1; 1149 return 1;
1133 } 1150 }
1134 1151
1135 sig = tsk->signal; 1152 sig = tsk->signal;
1136 /* Check if cputimer is running. This is accessed without locking. */ 1153 /*
1137 if (READ_ONCE(sig->cputimer.running)) { 1154 * Check if thread group timers expired when the cputimer is
1155 * running and no other thread in the group is already checking
1156 * for thread group cputimers. These fields are read without the
1157 * sighand lock. However, this is fine because this is meant to
1158 * be a fastpath heuristic to determine whether we should try to
1159 * acquire the sighand lock to check/handle timers.
1160 *
1161 * In the worst case scenario, if 'running' or 'checking_timer' gets
1162 * set but the current thread doesn't see the change yet, we'll wait
1163 * until the next thread in the group gets a scheduler interrupt to
1164 * handle the timer. This isn't an issue in practice because these
1165 * types of delays with signals actually getting sent are expected.
1166 */
1167 if (READ_ONCE(sig->cputimer.running) &&
1168 !READ_ONCE(sig->cputimer.checking_timer)) {
1138 struct task_cputime group_sample; 1169 struct task_cputime group_sample;
1139 1170
1140 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); 1171 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
@@ -1174,12 +1205,8 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1174 * put them on the firing list. 1205 * put them on the firing list.
1175 */ 1206 */
1176 check_thread_timers(tsk, &firing); 1207 check_thread_timers(tsk, &firing);
1177 /* 1208
1178 * If there are any active process wide timers (POSIX 1.b, itimers, 1209 check_process_timers(tsk, &firing);
1179 * RLIMIT_CPU) cputimer must be running.
1180 */
1181 if (READ_ONCE(tsk->signal->cputimer.running))
1182 check_process_timers(tsk, &firing);
1183 1210
1184 /* 1211 /*
1185 * We must release these locks before taking any timer's lock. 1212 * We must release these locks before taking any timer's lock.
diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc
index c7388dee8635..c48688904f9f 100644
--- a/kernel/time/timeconst.bc
+++ b/kernel/time/timeconst.bc
@@ -39,7 +39,7 @@ define fmuls(b,n,d) {
39} 39}
40 40
41define timeconst(hz) { 41define timeconst(hz) {
42 print "/* Automatically generated by kernel/timeconst.bc */\n" 42 print "/* Automatically generated by kernel/time/timeconst.bc */\n"
43 print "/* Time conversion constants for HZ == ", hz, " */\n" 43 print "/* Time conversion constants for HZ == ", hz, " */\n"
44 print "\n" 44 print "\n"
45 45
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 44d2cc0436f4..b1356b7ae570 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -849,7 +849,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
849#ifdef CONFIG_NTP_PPS 849#ifdef CONFIG_NTP_PPS
850 850
851/** 851/**
852 * getnstime_raw_and_real - get day and raw monotonic time in timespec format 852 * ktime_get_raw_and_real_ts64 - get day and raw monotonic time in timespec format
853 * @ts_raw: pointer to the timespec to be set to raw monotonic time 853 * @ts_raw: pointer to the timespec to be set to raw monotonic time
854 * @ts_real: pointer to the timespec to be set to the time of day 854 * @ts_real: pointer to the timespec to be set to the time of day
855 * 855 *
@@ -857,7 +857,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
857 * same time atomically and stores the resulting timestamps in timespec 857 * same time atomically and stores the resulting timestamps in timespec
858 * format. 858 * format.
859 */ 859 */
860void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) 860void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, struct timespec64 *ts_real)
861{ 861{
862 struct timekeeper *tk = &tk_core.timekeeper; 862 struct timekeeper *tk = &tk_core.timekeeper;
863 unsigned long seq; 863 unsigned long seq;
@@ -868,7 +868,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
868 do { 868 do {
869 seq = read_seqcount_begin(&tk_core.seq); 869 seq = read_seqcount_begin(&tk_core.seq);
870 870
871 *ts_raw = timespec64_to_timespec(tk->raw_time); 871 *ts_raw = tk->raw_time;
872 ts_real->tv_sec = tk->xtime_sec; 872 ts_real->tv_sec = tk->xtime_sec;
873 ts_real->tv_nsec = 0; 873 ts_real->tv_nsec = 0;
874 874
@@ -877,10 +877,10 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
877 877
878 } while (read_seqcount_retry(&tk_core.seq, seq)); 878 } while (read_seqcount_retry(&tk_core.seq, seq));
879 879
880 timespec_add_ns(ts_raw, nsecs_raw); 880 timespec64_add_ns(ts_raw, nsecs_raw);
881 timespec_add_ns(ts_real, nsecs_real); 881 timespec64_add_ns(ts_real, nsecs_real);
882} 882}
883EXPORT_SYMBOL(getnstime_raw_and_real); 883EXPORT_SYMBOL(ktime_get_raw_and_real_ts64);
884 884
885#endif /* CONFIG_NTP_PPS */ 885#endif /* CONFIG_NTP_PPS */
886 886
@@ -1674,7 +1674,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1674/** 1674/**
1675 * accumulate_nsecs_to_secs - Accumulates nsecs into secs 1675 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1676 * 1676 *
1677 * Helper function that accumulates a the nsecs greater then a second 1677 * Helper function that accumulates the nsecs greater than a second
1678 * from the xtime_nsec field to the xtime_secs field. 1678 * from the xtime_nsec field to the xtime_secs field.
1679 * It also calls into the NTP code to handle leapsecond processing. 1679 * It also calls into the NTP code to handle leapsecond processing.
1680 * 1680 *
@@ -1726,7 +1726,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1726 cycle_t interval = tk->cycle_interval << shift; 1726 cycle_t interval = tk->cycle_interval << shift;
1727 u64 raw_nsecs; 1727 u64 raw_nsecs;
1728 1728
1729 /* If the offset is smaller then a shifted interval, do nothing */ 1729 /* If the offset is smaller than a shifted interval, do nothing */
1730 if (offset < interval) 1730 if (offset < interval)
1731 return offset; 1731 return offset;
1732 1732
@@ -2025,7 +2025,7 @@ int do_adjtimex(struct timex *txc)
2025/** 2025/**
2026 * hardpps() - Accessor function to NTP __hardpps function 2026 * hardpps() - Accessor function to NTP __hardpps function
2027 */ 2027 */
2028void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) 2028void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2029{ 2029{
2030 unsigned long flags; 2030 unsigned long flags;
2031 2031
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 84190f02b521..74591ba9474f 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -461,10 +461,17 @@ void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
461 461
462static void timer_stats_account_timer(struct timer_list *timer) 462static void timer_stats_account_timer(struct timer_list *timer)
463{ 463{
464 if (likely(!timer->start_site)) 464 void *site;
465
466 /*
467 * start_site can be concurrently reset by
468 * timer_stats_timer_clear_start_info()
469 */
470 site = READ_ONCE(timer->start_site);
471 if (likely(!site))
465 return; 472 return;
466 473
467 timer_stats_update_stats(timer, timer->start_pid, timer->start_site, 474 timer_stats_update_stats(timer, timer->start_pid, site,
468 timer->function, timer->start_comm, 475 timer->function, timer->start_comm,
469 timer->flags); 476 timer->flags);
470} 477}
@@ -867,7 +874,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
867 if (mask == 0) 874 if (mask == 0)
868 return expires; 875 return expires;
869 876
870 bit = find_last_bit(&mask, BITS_PER_LONG); 877 bit = __fls(mask);
871 878
872 mask = (1UL << bit) - 1; 879 mask = (1UL << bit) - 1;
873 880
diff --git a/kernel/torture.c b/kernel/torture.c
index 3e4840633d3e..44aa462d033f 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -523,6 +523,7 @@ static int stutter;
523 */ 523 */
524void stutter_wait(const char *title) 524void stutter_wait(const char *title)
525{ 525{
526 cond_resched_rcu_qs();
526 while (READ_ONCE(stutter_pause_test) || 527 while (READ_ONCE(stutter_pause_test) ||
527 (torture_runnable && !READ_ONCE(*torture_runnable))) { 528 (torture_runnable && !READ_ONCE(*torture_runnable))) {
528 if (stutter_pause_test) 529 if (stutter_pause_test)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b0623ac785a2..00611e95a8ee 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5697,7 +5697,7 @@ free:
5697} 5697}
5698 5698
5699static void 5699static void
5700ftrace_graph_probe_sched_switch(void *ignore, 5700ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
5701 struct task_struct *prev, struct task_struct *next) 5701 struct task_struct *prev, struct task_struct *next)
5702{ 5702{
5703 unsigned long long timestamp; 5703 unsigned long long timestamp;
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index f270088e9929..4c896a0101bd 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -16,7 +16,8 @@ static int sched_ref;
16static DEFINE_MUTEX(sched_register_mutex); 16static DEFINE_MUTEX(sched_register_mutex);
17 17
18static void 18static void
19probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) 19probe_sched_switch(void *ignore, bool preempt,
20 struct task_struct *prev, struct task_struct *next)
20{ 21{
21 if (unlikely(!sched_ref)) 22 if (unlikely(!sched_ref))
22 return; 23 return;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 12cbe77b4136..4bcfbac289ff 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -420,7 +420,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
420} 420}
421 421
422static void notrace 422static void notrace
423probe_wakeup_sched_switch(void *ignore, 423probe_wakeup_sched_switch(void *ignore, bool preempt,
424 struct task_struct *prev, struct task_struct *next) 424 struct task_struct *prev, struct task_struct *next)
425{ 425{
426 struct trace_array_cpu *data; 426 struct trace_array_cpu *data;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index b746399ab59c..8abf1ba18085 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -85,9 +85,19 @@ check_stack(unsigned long ip, unsigned long *stack)
85 if (!object_is_on_stack(stack)) 85 if (!object_is_on_stack(stack))
86 return; 86 return;
87 87
88 /* Can't do this from NMI context (can cause deadlocks) */
89 if (in_nmi())
90 return;
91
88 local_irq_save(flags); 92 local_irq_save(flags);
89 arch_spin_lock(&max_stack_lock); 93 arch_spin_lock(&max_stack_lock);
90 94
95 /*
96 * RCU may not be watching, make it see us.
97 * The stack trace code uses rcu_sched.
98 */
99 rcu_irq_enter();
100
91 /* In case another CPU set the tracer_frame on us */ 101 /* In case another CPU set the tracer_frame on us */
92 if (unlikely(!frame_size)) 102 if (unlikely(!frame_size))
93 this_size -= tracer_frame; 103 this_size -= tracer_frame;
@@ -169,6 +179,7 @@ check_stack(unsigned long ip, unsigned long *stack)
169 } 179 }
170 180
171 out: 181 out:
182 rcu_irq_exit();
172 arch_spin_unlock(&max_stack_lock); 183 arch_spin_unlock(&max_stack_lock);
173 local_irq_restore(flags); 184 local_irq_restore(flags);
174} 185}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ab76b99adc85..1d1521c26302 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -197,6 +197,7 @@ config ENABLE_MUST_CHECK
197config FRAME_WARN 197config FRAME_WARN
198 int "Warn for stack frames larger than (needs gcc 4.4)" 198 int "Warn for stack frames larger than (needs gcc 4.4)"
199 range 0 8192 199 range 0 8192
200 default 0 if KASAN
200 default 1024 if !64BIT 201 default 1024 if !64BIT
201 default 2048 if 64BIT 202 default 2048 if 64BIT
202 help 203 help
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index f1cdeb024d17..6a823a53e357 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -44,7 +44,7 @@ static void fail_dump(struct fault_attr *attr)
44 printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" 44 printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
45 "name %pd, interval %lu, probability %lu, " 45 "name %pd, interval %lu, probability %lu, "
46 "space %d, times %d\n", attr->dname, 46 "space %d, times %d\n", attr->dname,
47 attr->probability, attr->interval, 47 attr->interval, attr->probability,
48 atomic_read(&attr->space), 48 atomic_read(&attr->space),
49 atomic_read(&attr->times)); 49 atomic_read(&attr->times));
50 if (attr->verbose > 1) 50 if (attr->verbose > 1)
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 88d3d32e5923..6019c53c669e 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -43,6 +43,12 @@ static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
43 printk("%.*s", (end - start) + 1, buf); 43 printk("%.*s", (end - start) + 1, buf);
44} 44}
45 45
46/*
47 * When raise() is called it will be is passed a pointer to the
48 * backtrace_mask. Architectures that call nmi_cpu_backtrace()
49 * directly from their raise() functions may rely on the mask
50 * they are passed being updated as a side effect of this call.
51 */
46void nmi_trigger_all_cpu_backtrace(bool include_self, 52void nmi_trigger_all_cpu_backtrace(bool include_self,
47 void (*raise)(cpumask_t *mask)) 53 void (*raise)(cpumask_t *mask))
48{ 54{
@@ -149,7 +155,10 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
149 /* Replace printk to write into the NMI seq */ 155 /* Replace printk to write into the NMI seq */
150 this_cpu_write(printk_func, nmi_vprintk); 156 this_cpu_write(printk_func, nmi_vprintk);
151 pr_warn("NMI backtrace for cpu %d\n", cpu); 157 pr_warn("NMI backtrace for cpu %d\n", cpu);
152 show_regs(regs); 158 if (regs)
159 show_regs(regs);
160 else
161 dump_stack();
153 this_cpu_write(printk_func, printk_func_save); 162 this_cpu_write(printk_func, printk_func_save);
154 163
155 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 164 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 2df8ddcb0ca0..619984fc07ec 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -480,6 +480,10 @@ static void cgwb_release_workfn(struct work_struct *work)
480 release_work); 480 release_work);
481 struct backing_dev_info *bdi = wb->bdi; 481 struct backing_dev_info *bdi = wb->bdi;
482 482
483 spin_lock_irq(&cgwb_lock);
484 list_del_rcu(&wb->bdi_node);
485 spin_unlock_irq(&cgwb_lock);
486
483 wb_shutdown(wb); 487 wb_shutdown(wb);
484 488
485 css_put(wb->memcg_css); 489 css_put(wb->memcg_css);
@@ -575,6 +579,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
575 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); 579 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
576 if (!ret) { 580 if (!ret) {
577 atomic_inc(&bdi->usage_cnt); 581 atomic_inc(&bdi->usage_cnt);
582 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
578 list_add(&wb->memcg_node, memcg_cgwb_list); 583 list_add(&wb->memcg_node, memcg_cgwb_list);
579 list_add(&wb->blkcg_node, blkcg_cgwb_list); 584 list_add(&wb->blkcg_node, blkcg_cgwb_list);
580 css_get(memcg_css); 585 css_get(memcg_css);
@@ -676,7 +681,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
676static void cgwb_bdi_destroy(struct backing_dev_info *bdi) 681static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
677{ 682{
678 struct radix_tree_iter iter; 683 struct radix_tree_iter iter;
679 struct bdi_writeback_congested *congested, *congested_n; 684 struct rb_node *rbn;
680 void **slot; 685 void **slot;
681 686
682 WARN_ON(test_bit(WB_registered, &bdi->wb.state)); 687 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
@@ -686,9 +691,11 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
686 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) 691 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
687 cgwb_kill(*slot); 692 cgwb_kill(*slot);
688 693
689 rbtree_postorder_for_each_entry_safe(congested, congested_n, 694 while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
690 &bdi->cgwb_congested_tree, rb_node) { 695 struct bdi_writeback_congested *congested =
691 rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree); 696 rb_entry(rbn, struct bdi_writeback_congested, rb_node);
697
698 rb_erase(rbn, &bdi->cgwb_congested_tree);
692 congested->bdi = NULL; /* mark @congested unlinked */ 699 congested->bdi = NULL; /* mark @congested unlinked */
693 } 700 }
694 701
@@ -764,15 +771,22 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
764 771
765int bdi_init(struct backing_dev_info *bdi) 772int bdi_init(struct backing_dev_info *bdi)
766{ 773{
774 int ret;
775
767 bdi->dev = NULL; 776 bdi->dev = NULL;
768 777
769 bdi->min_ratio = 0; 778 bdi->min_ratio = 0;
770 bdi->max_ratio = 100; 779 bdi->max_ratio = 100;
771 bdi->max_prop_frac = FPROP_FRAC_BASE; 780 bdi->max_prop_frac = FPROP_FRAC_BASE;
772 INIT_LIST_HEAD(&bdi->bdi_list); 781 INIT_LIST_HEAD(&bdi->bdi_list);
782 INIT_LIST_HEAD(&bdi->wb_list);
773 init_waitqueue_head(&bdi->wb_waitq); 783 init_waitqueue_head(&bdi->wb_waitq);
774 784
775 return cgwb_bdi_init(bdi); 785 ret = cgwb_bdi_init(bdi);
786
787 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
788
789 return ret;
776} 790}
777EXPORT_SYMBOL(bdi_init); 791EXPORT_SYMBOL(bdi_init);
778 792
@@ -823,7 +837,7 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
823 synchronize_rcu_expedited(); 837 synchronize_rcu_expedited();
824} 838}
825 839
826void bdi_destroy(struct backing_dev_info *bdi) 840void bdi_unregister(struct backing_dev_info *bdi)
827{ 841{
828 /* make sure nobody finds us on the bdi_list anymore */ 842 /* make sure nobody finds us on the bdi_list anymore */
829 bdi_remove_from_list(bdi); 843 bdi_remove_from_list(bdi);
@@ -835,9 +849,19 @@ void bdi_destroy(struct backing_dev_info *bdi)
835 device_unregister(bdi->dev); 849 device_unregister(bdi->dev);
836 bdi->dev = NULL; 850 bdi->dev = NULL;
837 } 851 }
852}
838 853
854void bdi_exit(struct backing_dev_info *bdi)
855{
856 WARN_ON_ONCE(bdi->dev);
839 wb_exit(&bdi->wb); 857 wb_exit(&bdi->wb);
840} 858}
859
860void bdi_destroy(struct backing_dev_info *bdi)
861{
862 bdi_unregister(bdi);
863 bdi_exit(bdi);
864}
841EXPORT_SYMBOL(bdi_destroy); 865EXPORT_SYMBOL(bdi_destroy);
842 866
843/* 867/*
diff --git a/mm/cma.c b/mm/cma.c
index e7d1db533025..4eb56badf37e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -361,7 +361,7 @@ err:
361 * This function allocates part of contiguous memory on specific 361 * This function allocates part of contiguous memory on specific
362 * contiguous memory area. 362 * contiguous memory area.
363 */ 363 */
364struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) 364struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
365{ 365{
366 unsigned long mask, offset, pfn, start = 0; 366 unsigned long mask, offset, pfn, start = 0;
367 unsigned long bitmap_maxno, bitmap_no, bitmap_count; 367 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
@@ -371,7 +371,7 @@ struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
371 if (!cma || !cma->count) 371 if (!cma || !cma->count)
372 return NULL; 372 return NULL;
373 373
374 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 374 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
375 count, align); 375 count, align);
376 376
377 if (!count) 377 if (!count)
diff --git a/mm/filemap.c b/mm/filemap.c
index 1cc5467cf36c..327910c2400c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2488,6 +2488,11 @@ again:
2488 break; 2488 break;
2489 } 2489 }
2490 2490
2491 if (fatal_signal_pending(current)) {
2492 status = -EINTR;
2493 break;
2494 }
2495
2491 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2496 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2492 &page, &fsdata); 2497 &page, &fsdata);
2493 if (unlikely(status < 0)) 2498 if (unlikely(status < 0))
@@ -2525,10 +2530,6 @@ again:
2525 written += copied; 2530 written += copied;
2526 2531
2527 balance_dirty_pages_ratelimited(mapping); 2532 balance_dirty_pages_ratelimited(mapping);
2528 if (fatal_signal_pending(current)) {
2529 status = -EINTR;
2530 break;
2531 }
2532 } while (iov_iter_count(i)); 2533 } while (iov_iter_count(i));
2533 2534
2534 return written ? written : status; 2535 return written ? written : status;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4b06b8db9df2..3fd0311c3ba7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1880,7 +1880,7 @@ static int __split_huge_page_map(struct page *page,
1880 * here). But it is generally safer to never allow 1880 * here). But it is generally safer to never allow
1881 * small and huge TLB entries for the same virtual 1881 * small and huge TLB entries for the same virtual
1882 * address to be loaded simultaneously. So instead of 1882 * address to be loaded simultaneously. So instead of
1883 * doing "pmd_populate(); flush_tlb_range();" we first 1883 * doing "pmd_populate(); flush_pmd_tlb_range();" we first
1884 * mark the current pmd notpresent (atomically because 1884 * mark the current pmd notpresent (atomically because
1885 * here the pmd_trans_huge and pmd_trans_splitting 1885 * here the pmd_trans_huge and pmd_trans_splitting
1886 * must remain set at all times on the pmd until the 1886 * must remain set at all times on the pmd until the
@@ -2206,7 +2206,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2206 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2206 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
2207 _pte++, address += PAGE_SIZE) { 2207 _pte++, address += PAGE_SIZE) {
2208 pte_t pteval = *_pte; 2208 pte_t pteval = *_pte;
2209 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 2209 if (pte_none(pteval) || (pte_present(pteval) &&
2210 is_zero_pfn(pte_pfn(pteval)))) {
2210 if (!userfaultfd_armed(vma) && 2211 if (!userfaultfd_armed(vma) &&
2211 ++none_or_zero <= khugepaged_max_ptes_none) 2212 ++none_or_zero <= khugepaged_max_ptes_none)
2212 continue; 2213 continue;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d9b5c817dce8..c57c4423c688 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3741,44 +3741,43 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3741/** 3741/**
3742 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3742 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3743 * @wb: bdi_writeback in question 3743 * @wb: bdi_writeback in question
3744 * @pavail: out parameter for number of available pages 3744 * @pfilepages: out parameter for number of file pages
3745 * @pheadroom: out parameter for number of allocatable pages according to memcg
3745 * @pdirty: out parameter for number of dirty pages 3746 * @pdirty: out parameter for number of dirty pages
3746 * @pwriteback: out parameter for number of pages under writeback 3747 * @pwriteback: out parameter for number of pages under writeback
3747 * 3748 *
3748 * Determine the numbers of available, dirty, and writeback pages in @wb's 3749 * Determine the numbers of file, headroom, dirty, and writeback pages in
3749 * memcg. Dirty and writeback are self-explanatory. Available is a bit 3750 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3750 * more involved. 3751 * is a bit more involved.
3751 * 3752 *
3752 * A memcg's headroom is "min(max, high) - used". The available memory is 3753 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3753 * calculated as the lowest headroom of itself and the ancestors plus the 3754 * headroom is calculated as the lowest headroom of itself and the
3754 * number of pages already being used for file pages. Note that this 3755 * ancestors. Note that this doesn't consider the actual amount of
3755 * doesn't consider the actual amount of available memory in the system. 3756 * available memory in the system. The caller should further cap
3756 * The caller should further cap *@pavail accordingly. 3757 * *@pheadroom accordingly.
3757 */ 3758 */
3758void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, 3759void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3759 unsigned long *pdirty, unsigned long *pwriteback) 3760 unsigned long *pheadroom, unsigned long *pdirty,
3761 unsigned long *pwriteback)
3760{ 3762{
3761 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3763 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3762 struct mem_cgroup *parent; 3764 struct mem_cgroup *parent;
3763 unsigned long head_room = PAGE_COUNTER_MAX;
3764 unsigned long file_pages;
3765 3765
3766 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3766 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3767 3767
3768 /* this should eventually include NR_UNSTABLE_NFS */ 3768 /* this should eventually include NR_UNSTABLE_NFS */
3769 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3769 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3770 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3771 (1 << LRU_ACTIVE_FILE));
3772 *pheadroom = PAGE_COUNTER_MAX;
3770 3773
3771 file_pages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3772 (1 << LRU_ACTIVE_FILE));
3773 while ((parent = parent_mem_cgroup(memcg))) { 3774 while ((parent = parent_mem_cgroup(memcg))) {
3774 unsigned long ceiling = min(memcg->memory.limit, memcg->high); 3775 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3775 unsigned long used = page_counter_read(&memcg->memory); 3776 unsigned long used = page_counter_read(&memcg->memory);
3776 3777
3777 head_room = min(head_room, ceiling - min(ceiling, used)); 3778 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3778 memcg = parent; 3779 memcg = parent;
3779 } 3780 }
3780
3781 *pavail = file_pages + head_room;
3782} 3781}
3783 3782
3784#else /* CONFIG_CGROUP_WRITEBACK */ 3783#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0a931cdd4f6b..2c90357c34ea 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -145,9 +145,6 @@ struct dirty_throttle_control {
145 unsigned long pos_ratio; 145 unsigned long pos_ratio;
146}; 146};
147 147
148#define DTC_INIT_COMMON(__wb) .wb = (__wb), \
149 .wb_completions = &(__wb)->completions
150
151/* 148/*
152 * Length of period for aging writeout fractions of bdis. This is an 149 * Length of period for aging writeout fractions of bdis. This is an
153 * arbitrarily chosen number. The longer the period, the slower fractions will 150 * arbitrarily chosen number. The longer the period, the slower fractions will
@@ -157,12 +154,16 @@ struct dirty_throttle_control {
157 154
158#ifdef CONFIG_CGROUP_WRITEBACK 155#ifdef CONFIG_CGROUP_WRITEBACK
159 156
160#define GDTC_INIT(__wb) .dom = &global_wb_domain, \ 157#define GDTC_INIT(__wb) .wb = (__wb), \
161 DTC_INIT_COMMON(__wb) 158 .dom = &global_wb_domain, \
159 .wb_completions = &(__wb)->completions
160
162#define GDTC_INIT_NO_WB .dom = &global_wb_domain 161#define GDTC_INIT_NO_WB .dom = &global_wb_domain
163#define MDTC_INIT(__wb, __gdtc) .dom = mem_cgroup_wb_domain(__wb), \ 162
164 .gdtc = __gdtc, \ 163#define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
165 DTC_INIT_COMMON(__wb) 164 .dom = mem_cgroup_wb_domain(__wb), \
165 .wb_completions = &(__wb)->memcg_completions, \
166 .gdtc = __gdtc
166 167
167static bool mdtc_valid(struct dirty_throttle_control *dtc) 168static bool mdtc_valid(struct dirty_throttle_control *dtc)
168{ 169{
@@ -213,7 +214,8 @@ static void wb_min_max_ratio(struct bdi_writeback *wb,
213 214
214#else /* CONFIG_CGROUP_WRITEBACK */ 215#else /* CONFIG_CGROUP_WRITEBACK */
215 216
216#define GDTC_INIT(__wb) DTC_INIT_COMMON(__wb) 217#define GDTC_INIT(__wb) .wb = (__wb), \
218 .wb_completions = &(__wb)->completions
217#define GDTC_INIT_NO_WB 219#define GDTC_INIT_NO_WB
218#define MDTC_INIT(__wb, __gdtc) 220#define MDTC_INIT(__wb, __gdtc)
219 221
@@ -682,13 +684,19 @@ static unsigned long hard_dirty_limit(struct wb_domain *dom,
682 return max(thresh, dom->dirty_limit); 684 return max(thresh, dom->dirty_limit);
683} 685}
684 686
685/* memory available to a memcg domain is capped by system-wide clean memory */ 687/*
686static void mdtc_cap_avail(struct dirty_throttle_control *mdtc) 688 * Memory which can be further allocated to a memcg domain is capped by
689 * system-wide clean memory excluding the amount being used in the domain.
690 */
691static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
692 unsigned long filepages, unsigned long headroom)
687{ 693{
688 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc); 694 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
689 unsigned long clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); 695 unsigned long clean = filepages - min(filepages, mdtc->dirty);
696 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
697 unsigned long other_clean = global_clean - min(global_clean, clean);
690 698
691 mdtc->avail = min(mdtc->avail, clean); 699 mdtc->avail = filepages + min(headroom, other_clean);
692} 700}
693 701
694/** 702/**
@@ -1562,16 +1570,16 @@ static void balance_dirty_pages(struct address_space *mapping,
1562 } 1570 }
1563 1571
1564 if (mdtc) { 1572 if (mdtc) {
1565 unsigned long writeback; 1573 unsigned long filepages, headroom, writeback;
1566 1574
1567 /* 1575 /*
1568 * If @wb belongs to !root memcg, repeat the same 1576 * If @wb belongs to !root memcg, repeat the same
1569 * basic calculations for the memcg domain. 1577 * basic calculations for the memcg domain.
1570 */ 1578 */
1571 mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, 1579 mem_cgroup_wb_stats(wb, &filepages, &headroom,
1572 &writeback); 1580 &mdtc->dirty, &writeback);
1573 mdtc_cap_avail(mdtc);
1574 mdtc->dirty += writeback; 1581 mdtc->dirty += writeback;
1582 mdtc_calc_avail(mdtc, filepages, headroom);
1575 1583
1576 domain_dirty_limits(mdtc); 1584 domain_dirty_limits(mdtc);
1577 1585
@@ -1893,10 +1901,11 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
1893 return true; 1901 return true;
1894 1902
1895 if (mdtc) { 1903 if (mdtc) {
1896 unsigned long writeback; 1904 unsigned long filepages, headroom, writeback;
1897 1905
1898 mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, &writeback); 1906 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
1899 mdtc_cap_avail(mdtc); 1907 &writeback);
1908 mdtc_calc_avail(mdtc, filepages, headroom);
1900 domain_dirty_limits(mdtc); /* ditto, ignore writeback */ 1909 domain_dirty_limits(mdtc); /* ditto, ignore writeback */
1901 1910
1902 if (mdtc->dirty > mdtc->bg_thresh) 1911 if (mdtc->dirty > mdtc->bg_thresh)
@@ -1956,7 +1965,6 @@ void laptop_mode_timer_fn(unsigned long data)
1956 int nr_pages = global_page_state(NR_FILE_DIRTY) + 1965 int nr_pages = global_page_state(NR_FILE_DIRTY) +
1957 global_page_state(NR_UNSTABLE_NFS); 1966 global_page_state(NR_UNSTABLE_NFS);
1958 struct bdi_writeback *wb; 1967 struct bdi_writeback *wb;
1959 struct wb_iter iter;
1960 1968
1961 /* 1969 /*
1962 * We want to write everything out, not just down to the dirty 1970 * We want to write everything out, not just down to the dirty
@@ -1965,10 +1973,12 @@ void laptop_mode_timer_fn(unsigned long data)
1965 if (!bdi_has_dirty_io(&q->backing_dev_info)) 1973 if (!bdi_has_dirty_io(&q->backing_dev_info))
1966 return; 1974 return;
1967 1975
1968 bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0) 1976 rcu_read_lock();
1977 list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
1969 if (wb_has_dirty_io(wb)) 1978 if (wb_has_dirty_io(wb))
1970 wb_start_writeback(wb, nr_pages, true, 1979 wb_start_writeback(wb, nr_pages, true,
1971 WB_REASON_LAPTOP_TIMER); 1980 WB_REASON_LAPTOP_TIMER);
1981 rcu_read_unlock();
1972} 1982}
1973 1983
1974/* 1984/*
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 6b674e00153c..7d3db0247983 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -57,35 +57,59 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
57} 57}
58#endif 58#endif
59 59
60#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
61int ptep_clear_flush_young(struct vm_area_struct *vma,
62 unsigned long address, pte_t *ptep)
63{
64 int young;
65 young = ptep_test_and_clear_young(vma, address, ptep);
66 if (young)
67 flush_tlb_page(vma, address);
68 return young;
69}
70#endif
71
72#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
73pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
74 pte_t *ptep)
75{
76 struct mm_struct *mm = (vma)->vm_mm;
77 pte_t pte;
78 pte = ptep_get_and_clear(mm, address, ptep);
79 if (pte_accessible(mm, pte))
80 flush_tlb_page(vma, address);
81 return pte;
82}
83#endif
84
85#ifdef CONFIG_TRANSPARENT_HUGEPAGE
86
87#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
88
89/*
90 * ARCHes with special requirements for evicting THP backing TLB entries can
91 * implement this. Otherwise also, it can help optimize normal TLB flush in
92 * THP regime. stock flush_tlb_range() typically has optimization to nuke the
93 * entire TLB TLB if flush span is greater than a threshhold, which will
94 * likely be true for a single huge page. Thus a single thp flush will
95 * invalidate the entire TLB which is not desitable.
96 * e.g. see arch/arc: flush_pmd_tlb_range
97 */
98#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
99#endif
100
60#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 101#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
61int pmdp_set_access_flags(struct vm_area_struct *vma, 102int pmdp_set_access_flags(struct vm_area_struct *vma,
62 unsigned long address, pmd_t *pmdp, 103 unsigned long address, pmd_t *pmdp,
63 pmd_t entry, int dirty) 104 pmd_t entry, int dirty)
64{ 105{
65#ifdef CONFIG_TRANSPARENT_HUGEPAGE
66 int changed = !pmd_same(*pmdp, entry); 106 int changed = !pmd_same(*pmdp, entry);
67 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 107 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
68 if (changed) { 108 if (changed) {
69 set_pmd_at(vma->vm_mm, address, pmdp, entry); 109 set_pmd_at(vma->vm_mm, address, pmdp, entry);
70 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 110 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
71 } 111 }
72 return changed; 112 return changed;
73#else /* CONFIG_TRANSPARENT_HUGEPAGE */
74 BUG();
75 return 0;
76#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
77}
78#endif
79
80#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
81int ptep_clear_flush_young(struct vm_area_struct *vma,
82 unsigned long address, pte_t *ptep)
83{
84 int young;
85 young = ptep_test_and_clear_young(vma, address, ptep);
86 if (young)
87 flush_tlb_page(vma, address);
88 return young;
89} 113}
90#endif 114#endif
91 115
@@ -94,33 +118,15 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
94 unsigned long address, pmd_t *pmdp) 118 unsigned long address, pmd_t *pmdp)
95{ 119{
96 int young; 120 int young;
97#ifdef CONFIG_TRANSPARENT_HUGEPAGE
98 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 121 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
99#else
100 BUG();
101#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
102 young = pmdp_test_and_clear_young(vma, address, pmdp); 122 young = pmdp_test_and_clear_young(vma, address, pmdp);
103 if (young) 123 if (young)
104 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 124 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
105 return young; 125 return young;
106} 126}
107#endif 127#endif
108 128
109#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
110pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
111 pte_t *ptep)
112{
113 struct mm_struct *mm = (vma)->vm_mm;
114 pte_t pte;
115 pte = ptep_get_and_clear(mm, address, ptep);
116 if (pte_accessible(mm, pte))
117 flush_tlb_page(vma, address);
118 return pte;
119}
120#endif
121
122#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 129#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
123#ifdef CONFIG_TRANSPARENT_HUGEPAGE
124pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 130pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
125 pmd_t *pmdp) 131 pmd_t *pmdp)
126{ 132{
@@ -128,14 +134,12 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
128 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 134 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
129 VM_BUG_ON(!pmd_trans_huge(*pmdp)); 135 VM_BUG_ON(!pmd_trans_huge(*pmdp));
130 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 136 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
131 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 137 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
132 return pmd; 138 return pmd;
133} 139}
134#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
135#endif 140#endif
136 141
137#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH 142#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
138#ifdef CONFIG_TRANSPARENT_HUGEPAGE
139void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, 143void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
140 pmd_t *pmdp) 144 pmd_t *pmdp)
141{ 145{
@@ -143,13 +147,11 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
143 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 147 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
144 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 148 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
145 /* tlb flush only to serialize against gup-fast */ 149 /* tlb flush only to serialize against gup-fast */
146 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 150 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
147} 151}
148#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
149#endif 152#endif
150 153
151#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 154#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
152#ifdef CONFIG_TRANSPARENT_HUGEPAGE
153void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 155void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
154 pgtable_t pgtable) 156 pgtable_t pgtable)
155{ 157{
@@ -162,11 +164,9 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
162 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); 164 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
163 pmd_huge_pte(mm, pmdp) = pgtable; 165 pmd_huge_pte(mm, pmdp) = pgtable;
164} 166}
165#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
166#endif 167#endif
167 168
168#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 169#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
169#ifdef CONFIG_TRANSPARENT_HUGEPAGE
170/* no "address" argument so destroys page coloring of some arch */ 170/* no "address" argument so destroys page coloring of some arch */
171pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 171pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
172{ 172{
@@ -185,23 +185,19 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
185 } 185 }
186 return pgtable; 186 return pgtable;
187} 187}
188#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
189#endif 188#endif
190 189
191#ifndef __HAVE_ARCH_PMDP_INVALIDATE 190#ifndef __HAVE_ARCH_PMDP_INVALIDATE
192#ifdef CONFIG_TRANSPARENT_HUGEPAGE
193void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 191void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
194 pmd_t *pmdp) 192 pmd_t *pmdp)
195{ 193{
196 pmd_t entry = *pmdp; 194 pmd_t entry = *pmdp;
197 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); 195 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
198 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 196 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
199} 197}
200#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
201#endif 198#endif
202 199
203#ifndef pmdp_collapse_flush 200#ifndef pmdp_collapse_flush
204#ifdef CONFIG_TRANSPARENT_HUGEPAGE
205pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 201pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
206 pmd_t *pmdp) 202 pmd_t *pmdp)
207{ 203{
@@ -214,8 +210,8 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
214 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 210 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
215 VM_BUG_ON(pmd_trans_huge(*pmdp)); 211 VM_BUG_ON(pmd_trans_huge(*pmdp));
216 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 212 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
217 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 213 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
218 return pmd; 214 return pmd;
219} 215}
220#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
221#endif 216#endif
217#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2faaa2976447..af3a519e40c2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2688,52 +2688,5 @@ static int __init proc_vmalloc_init(void)
2688} 2688}
2689module_init(proc_vmalloc_init); 2689module_init(proc_vmalloc_init);
2690 2690
2691void get_vmalloc_info(struct vmalloc_info *vmi)
2692{
2693 struct vmap_area *va;
2694 unsigned long free_area_size;
2695 unsigned long prev_end;
2696
2697 vmi->used = 0;
2698 vmi->largest_chunk = 0;
2699
2700 prev_end = VMALLOC_START;
2701
2702 rcu_read_lock();
2703
2704 if (list_empty(&vmap_area_list)) {
2705 vmi->largest_chunk = VMALLOC_TOTAL;
2706 goto out;
2707 }
2708
2709 list_for_each_entry_rcu(va, &vmap_area_list, list) {
2710 unsigned long addr = va->va_start;
2711
2712 /*
2713 * Some archs keep another range for modules in vmalloc space
2714 */
2715 if (addr < VMALLOC_START)
2716 continue;
2717 if (addr >= VMALLOC_END)
2718 break;
2719
2720 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2721 continue;
2722
2723 vmi->used += (va->va_end - va->va_start);
2724
2725 free_area_size = addr - prev_end;
2726 if (vmi->largest_chunk < free_area_size)
2727 vmi->largest_chunk = free_area_size;
2728
2729 prev_end = va->va_end;
2730 }
2731
2732 if (VMALLOC_END - prev_end > vmi->largest_chunk)
2733 vmi->largest_chunk = VMALLOC_END - prev_end;
2734
2735out:
2736 rcu_read_unlock();
2737}
2738#endif 2691#endif
2739 2692
diff --git a/net/core/dev.c b/net/core/dev.c
index 6bb6470f5b7b..c14748d051e7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -99,6 +99,7 @@
99#include <linux/rtnetlink.h> 99#include <linux/rtnetlink.h>
100#include <linux/stat.h> 100#include <linux/stat.h>
101#include <net/dst.h> 101#include <net/dst.h>
102#include <net/dst_metadata.h>
102#include <net/pkt_sched.h> 103#include <net/pkt_sched.h>
103#include <net/checksum.h> 104#include <net/checksum.h>
104#include <net/xfrm.h> 105#include <net/xfrm.h>
@@ -682,6 +683,32 @@ int dev_get_iflink(const struct net_device *dev)
682EXPORT_SYMBOL(dev_get_iflink); 683EXPORT_SYMBOL(dev_get_iflink);
683 684
684/** 685/**
686 * dev_fill_metadata_dst - Retrieve tunnel egress information.
687 * @dev: targeted interface
688 * @skb: The packet.
689 *
690 * For better visibility of tunnel traffic OVS needs to retrieve
691 * egress tunnel information for a packet. Following API allows
692 * user to get this info.
693 */
694int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
695{
696 struct ip_tunnel_info *info;
697
698 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
699 return -EINVAL;
700
701 info = skb_tunnel_info_unclone(skb);
702 if (!info)
703 return -ENOMEM;
704 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
705 return -EINVAL;
706
707 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
708}
709EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
710
711/**
685 * __dev_get_by_name - find a device by its name 712 * __dev_get_by_name - find a device by its name
686 * @net: the applicable net namespace 713 * @net: the applicable net namespace
687 * @name: name to find 714 * @name: name to find
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 6c2af797f2f9..744e5936c10d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1569,7 +1569,7 @@ static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
1569 do { 1569 do {
1570 /* record parent and next child index */ 1570 /* record parent and next child index */
1571 pn = n; 1571 pn = n;
1572 cindex = key ? get_index(key, pn) : 0; 1572 cindex = (key > pn->key) ? get_index(key, pn) : 0;
1573 1573
1574 if (cindex >> pn->bits) 1574 if (cindex >> pn->bits)
1575 break; 1575 break;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 5aa46d4b44ef..5a8ee3282550 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -36,7 +36,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
36 SKB_GSO_TCP_ECN | 36 SKB_GSO_TCP_ECN |
37 SKB_GSO_GRE | 37 SKB_GSO_GRE |
38 SKB_GSO_GRE_CSUM | 38 SKB_GSO_GRE_CSUM |
39 SKB_GSO_IPIP))) 39 SKB_GSO_IPIP |
40 SKB_GSO_SIT)))
40 goto out; 41 goto out;
41 42
42 if (!skb->encapsulation) 43 if (!skb->encapsulation)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index bd0679d90519..614521437e30 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -498,10 +498,26 @@ static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
498 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); 498 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
499} 499}
500 500
501static struct rtable *gre_get_rt(struct sk_buff *skb,
502 struct net_device *dev,
503 struct flowi4 *fl,
504 const struct ip_tunnel_key *key)
505{
506 struct net *net = dev_net(dev);
507
508 memset(fl, 0, sizeof(*fl));
509 fl->daddr = key->u.ipv4.dst;
510 fl->saddr = key->u.ipv4.src;
511 fl->flowi4_tos = RT_TOS(key->tos);
512 fl->flowi4_mark = skb->mark;
513 fl->flowi4_proto = IPPROTO_GRE;
514
515 return ip_route_output_key(net, fl);
516}
517
501static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) 518static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
502{ 519{
503 struct ip_tunnel_info *tun_info; 520 struct ip_tunnel_info *tun_info;
504 struct net *net = dev_net(dev);
505 const struct ip_tunnel_key *key; 521 const struct ip_tunnel_key *key;
506 struct flowi4 fl; 522 struct flowi4 fl;
507 struct rtable *rt; 523 struct rtable *rt;
@@ -516,14 +532,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
516 goto err_free_skb; 532 goto err_free_skb;
517 533
518 key = &tun_info->key; 534 key = &tun_info->key;
519 memset(&fl, 0, sizeof(fl)); 535 rt = gre_get_rt(skb, dev, &fl, key);
520 fl.daddr = key->u.ipv4.dst;
521 fl.saddr = key->u.ipv4.src;
522 fl.flowi4_tos = RT_TOS(key->tos);
523 fl.flowi4_mark = skb->mark;
524 fl.flowi4_proto = IPPROTO_GRE;
525
526 rt = ip_route_output_key(net, &fl);
527 if (IS_ERR(rt)) 536 if (IS_ERR(rt))
528 goto err_free_skb; 537 goto err_free_skb;
529 538
@@ -566,6 +575,24 @@ err_free_skb:
566 dev->stats.tx_dropped++; 575 dev->stats.tx_dropped++;
567} 576}
568 577
578static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
579{
580 struct ip_tunnel_info *info = skb_tunnel_info(skb);
581 struct rtable *rt;
582 struct flowi4 fl4;
583
584 if (ip_tunnel_info_af(info) != AF_INET)
585 return -EINVAL;
586
587 rt = gre_get_rt(skb, dev, &fl4, &info->key);
588 if (IS_ERR(rt))
589 return PTR_ERR(rt);
590
591 ip_rt_put(rt);
592 info->key.u.ipv4.src = fl4.saddr;
593 return 0;
594}
595
569static netdev_tx_t ipgre_xmit(struct sk_buff *skb, 596static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
570 struct net_device *dev) 597 struct net_device *dev)
571{ 598{
@@ -1023,6 +1050,7 @@ static const struct net_device_ops gre_tap_netdev_ops = {
1023 .ndo_change_mtu = ip_tunnel_change_mtu, 1050 .ndo_change_mtu = ip_tunnel_change_mtu,
1024 .ndo_get_stats64 = ip_tunnel_get_stats64, 1051 .ndo_get_stats64 = ip_tunnel_get_stats64,
1025 .ndo_get_iflink = ip_tunnel_get_iflink, 1052 .ndo_get_iflink = ip_tunnel_get_iflink,
1053 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1026}; 1054};
1027 1055
1028static void ipgre_tap_setup(struct net_device *dev) 1056static void ipgre_tap_setup(struct net_device *dev)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 690d27d3f2f9..a35584176535 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -75,6 +75,7 @@ endif # NF_TABLES
75 75
76config NF_DUP_IPV4 76config NF_DUP_IPV4
77 tristate "Netfilter IPv4 packet duplication to alternate destination" 77 tristate "Netfilter IPv4 packet duplication to alternate destination"
78 depends on !NF_CONNTRACK || NF_CONNTRACK
78 help 79 help
79 This option enables the nf_dup_ipv4 core, which duplicates an IPv4 80 This option enables the nf_dup_ipv4 core, which duplicates an IPv4
80 packet to be rerouted to another destination. 81 packet to be rerouted to another destination.
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 8618fd150c96..c4ffc9de1654 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -61,9 +61,7 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
61 if (FIB_RES_DEV(res) == dev) 61 if (FIB_RES_DEV(res) == dev)
62 dev_match = true; 62 dev_match = true;
63#endif 63#endif
64 if (dev_match || flags & XT_RPFILTER_LOOSE) 64 return dev_match || flags & XT_RPFILTER_LOOSE;
65 return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
66 return dev_match;
67} 65}
68 66
69static bool rpfilter_is_local(const struct sk_buff *skb) 67static bool rpfilter_is_local(const struct sk_buff *skb)
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 7092a61c4dc8..7e538f71f5fb 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -209,7 +209,7 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
209 209
210 /* alpha = (1 - g) * alpha + g * F */ 210 /* alpha = (1 - g) * alpha + g * F */
211 211
212 alpha -= alpha >> dctcp_shift_g; 212 alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
213 if (bytes_ecn) { 213 if (bytes_ecn) {
214 /* If dctcp_shift_g == 1, a 32bit value would overflow 214 /* If dctcp_shift_g == 1, a 32bit value would overflow
215 * after 8 Mbytes. 215 * after 8 Mbytes.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1100ffe4a722..3dbee0d83b15 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3405,7 +3405,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
3405 */ 3405 */
3406 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 3406 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3407 skb_mstamp_get(&skb->skb_mstamp); 3407 skb_mstamp_get(&skb->skb_mstamp);
3408 NET_INC_STATS_BH(sock_net(sk), mib); 3408 NET_INC_STATS(sock_net(sk), mib);
3409 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 3409 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
3410} 3410}
3411 3411
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 2878dbfffeb7..41a261355662 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -30,6 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
30 30
31 mtu = dst_mtu(skb_dst(skb)); 31 mtu = dst_mtu(skb_dst(skb));
32 if (skb->len > mtu) { 32 if (skb->len > mtu) {
33 skb->protocol = htons(ETH_P_IP);
34
33 if (skb->sk) 35 if (skb->sk)
34 xfrm_local_error(skb, mtu); 36 xfrm_local_error(skb, mtu);
35 else 37 else
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 9f777ec59a59..ed33abf57abd 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,6 +32,7 @@ struct fib6_rule {
32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, 32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
33 int flags, pol_lookup_t lookup) 33 int flags, pol_lookup_t lookup)
34{ 34{
35 struct rt6_info *rt;
35 struct fib_lookup_arg arg = { 36 struct fib_lookup_arg arg = {
36 .lookup_ptr = lookup, 37 .lookup_ptr = lookup,
37 .flags = FIB_LOOKUP_NOREF, 38 .flags = FIB_LOOKUP_NOREF,
@@ -40,11 +41,21 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
40 fib_rules_lookup(net->ipv6.fib6_rules_ops, 41 fib_rules_lookup(net->ipv6.fib6_rules_ops,
41 flowi6_to_flowi(fl6), flags, &arg); 42 flowi6_to_flowi(fl6), flags, &arg);
42 43
43 if (arg.result) 44 rt = arg.result;
44 return arg.result;
45 45
46 dst_hold(&net->ipv6.ip6_null_entry->dst); 46 if (!rt) {
47 return &net->ipv6.ip6_null_entry->dst; 47 dst_hold(&net->ipv6.ip6_null_entry->dst);
48 return &net->ipv6.ip6_null_entry->dst;
49 }
50
51 if (rt->rt6i_flags & RTF_REJECT &&
52 rt->dst.error == -EAGAIN) {
53 ip6_rt_put(rt);
54 rt = net->ipv6.ip6_null_entry;
55 dst_hold(&rt->dst);
56 }
57
58 return &rt->dst;
48} 59}
49 60
50static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, 61static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 7d2e0023c72d..6cedc62b2abb 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -285,7 +285,17 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
285struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, 285struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
286 int flags, pol_lookup_t lookup) 286 int flags, pol_lookup_t lookup)
287{ 287{
288 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); 288 struct rt6_info *rt;
289
290 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
291 if (rt->rt6i_flags & RTF_REJECT &&
292 rt->dst.error == -EAGAIN) {
293 ip6_rt_put(rt);
294 rt = net->ipv6.ip6_null_entry;
295 dst_hold(&rt->dst);
296 }
297
298 return &rt->dst;
289} 299}
290 300
291static void __net_init fib6_tables_init(struct net *net) 301static void __net_init fib6_tables_init(struct net *net)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 61d403ee1031..f84ec4e9b2de 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -584,6 +584,8 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
584 if (np->frag_size) 584 if (np->frag_size)
585 mtu = np->frag_size; 585 mtu = np->frag_size;
586 } 586 }
587 if (mtu < hlen + sizeof(struct frag_hdr) + 8)
588 goto fail_toobig;
587 mtu -= hlen + sizeof(struct frag_hdr); 589 mtu -= hlen + sizeof(struct frag_hdr);
588 590
589 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, 591 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
@@ -877,7 +879,8 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
877#ifdef CONFIG_IPV6_SUBTREES 879#ifdef CONFIG_IPV6_SUBTREES
878 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) || 880 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
879#endif 881#endif
880 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) { 882 (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
883 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
881 dst_release(dst); 884 dst_release(dst);
882 dst = NULL; 885 dst = NULL;
883 } 886 }
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 96833e4b3193..f6a024e141e5 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -58,6 +58,7 @@ endif # NF_TABLES
58 58
59config NF_DUP_IPV6 59config NF_DUP_IPV6
60 tristate "Netfilter IPv6 packet duplication to alternate destination" 60 tristate "Netfilter IPv6 packet duplication to alternate destination"
61 depends on !NF_CONNTRACK || NF_CONNTRACK
61 help 62 help
62 This option enables the nf_dup_ipv6 core, which duplicates an IPv6 63 This option enables the nf_dup_ipv6 core, which duplicates an IPv6
63 packet to be rerouted to another destination. 64 packet to be rerouted to another destination.
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 701cd2bae0a9..c7196ad1d69f 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -646,6 +646,7 @@ void nf_ct_frag6_consume_orig(struct sk_buff *skb)
646 s = s2; 646 s = s2;
647 } 647 }
648} 648}
649EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig);
649 650
650static int nf_ct_net_init(struct net *net) 651static int nf_ct_net_init(struct net *net)
651{ 652{
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 968f31c01f89..946880ad48ac 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1068,6 +1068,9 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1068 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 1068 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1069 saved_fn = fn; 1069 saved_fn = fn;
1070 1070
1071 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1072 oif = 0;
1073
1071redo_rt6_select: 1074redo_rt6_select:
1072 rt = rt6_select(fn, oif, strict); 1075 rt = rt6_select(fn, oif, strict);
1073 if (rt->rt6i_nsiblings) 1076 if (rt->rt6i_nsiblings)
@@ -1190,14 +1193,16 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
1190 struct flowi6 *fl6) 1193 struct flowi6 *fl6)
1191{ 1194{
1192 int flags = 0; 1195 int flags = 0;
1196 bool any_src;
1193 1197
1194 fl6->flowi6_iif = LOOPBACK_IFINDEX; 1198 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1195 1199
1200 any_src = ipv6_addr_any(&fl6->saddr);
1196 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || 1201 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1197 fl6->flowi6_oif) 1202 (fl6->flowi6_oif && any_src))
1198 flags |= RT6_LOOKUP_F_IFACE; 1203 flags |= RT6_LOOKUP_F_IFACE;
1199 1204
1200 if (!ipv6_addr_any(&fl6->saddr)) 1205 if (!any_src)
1201 flags |= RT6_LOOKUP_F_HAS_SADDR; 1206 flags |= RT6_LOOKUP_F_HAS_SADDR;
1202 else if (sk) 1207 else if (sk)
1203 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs); 1208 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 09c76a7b474d..e15feb7b413d 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -79,6 +79,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
79 79
80 if (!skb->ignore_df && skb->len > mtu) { 80 if (!skb->ignore_df && skb->len > mtu) {
81 skb->dev = dst->dev; 81 skb->dev = dst->dev;
82 skb->protocol = htons(ETH_P_IPV6);
82 83
83 if (xfrm6_local_dontfrag(skb)) 84 if (xfrm6_local_dontfrag(skb))
84 xfrm6_local_rxpmtu(skb, mtu); 85 xfrm6_local_rxpmtu(skb, mtu);
@@ -136,6 +137,7 @@ static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
136 struct dst_entry *dst = skb_dst(skb); 137 struct dst_entry *dst = skb_dst(skb);
137 struct xfrm_state *x = dst->xfrm; 138 struct xfrm_state *x = dst->xfrm;
138 int mtu; 139 int mtu;
140 bool toobig;
139 141
140#ifdef CONFIG_NETFILTER 142#ifdef CONFIG_NETFILTER
141 if (!x) { 143 if (!x) {
@@ -144,25 +146,29 @@ static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
144 } 146 }
145#endif 147#endif
146 148
149 if (x->props.mode != XFRM_MODE_TUNNEL)
150 goto skip_frag;
151
147 if (skb->protocol == htons(ETH_P_IPV6)) 152 if (skb->protocol == htons(ETH_P_IPV6))
148 mtu = ip6_skb_dst_mtu(skb); 153 mtu = ip6_skb_dst_mtu(skb);
149 else 154 else
150 mtu = dst_mtu(skb_dst(skb)); 155 mtu = dst_mtu(skb_dst(skb));
151 156
152 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { 157 toobig = skb->len > mtu && !skb_is_gso(skb);
158
159 if (toobig && xfrm6_local_dontfrag(skb)) {
153 xfrm6_local_rxpmtu(skb, mtu); 160 xfrm6_local_rxpmtu(skb, mtu);
154 return -EMSGSIZE; 161 return -EMSGSIZE;
155 } else if (!skb->ignore_df && skb->len > mtu && skb->sk) { 162 } else if (!skb->ignore_df && toobig && skb->sk) {
156 xfrm_local_error(skb, mtu); 163 xfrm_local_error(skb, mtu);
157 return -EMSGSIZE; 164 return -EMSGSIZE;
158 } 165 }
159 166
160 if (x->props.mode == XFRM_MODE_TUNNEL && 167 if (toobig || dst_allfrag(skb_dst(skb)))
161 ((skb->len > mtu && !skb_is_gso(skb)) ||
162 dst_allfrag(skb_dst(skb)))) {
163 return ip6_fragment(sk, skb, 168 return ip6_fragment(sk, skb,
164 x->outer_mode->afinfo->output_finish); 169 x->outer_mode->afinfo->output_finish);
165 } 170
171skip_frag:
166 return x->outer_mode->afinfo->output_finish(sk, skb); 172 return x->outer_mode->afinfo->output_finish(sk, skb);
167} 173}
168 174
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 5cedfda4b241..da55e0c85bb8 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -179,7 +179,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
179 return; 179 return;
180 180
181 case IPPROTO_ICMPV6: 181 case IPPROTO_ICMPV6:
182 if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { 182 if (!onlyproto && (nh + offset + 2 < skb->data ||
183 pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
183 u8 *icmp; 184 u8 *icmp;
184 185
185 nh = skb_network_header(skb); 186 nh = skb_network_header(skb);
@@ -193,7 +194,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
193#if IS_ENABLED(CONFIG_IPV6_MIP6) 194#if IS_ENABLED(CONFIG_IPV6_MIP6)
194 case IPPROTO_MH: 195 case IPPROTO_MH:
195 offset += ipv6_optlen(exthdr); 196 offset += ipv6_optlen(exthdr);
196 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { 197 if (!onlyproto && (nh + offset + 3 < skb->data ||
198 pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
197 struct ip6_mh *mh; 199 struct ip6_mh *mh;
198 200
199 nh = skb_network_header(skb); 201 nh = skb_network_header(skb);
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index a26c401ef4a4..43964594aa12 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -1839,7 +1839,7 @@ static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off)
1839 for (element = hashbin_get_first(iter->hashbin); 1839 for (element = hashbin_get_first(iter->hashbin);
1840 element != NULL; 1840 element != NULL;
1841 element = hashbin_get_next(iter->hashbin)) { 1841 element = hashbin_get_next(iter->hashbin)) {
1842 if (!off || *off-- == 0) { 1842 if (!off || (*off)-- == 0) {
1843 /* NB: hashbin left locked */ 1843 /* NB: hashbin left locked */
1844 return element; 1844 return element;
1845 } 1845 }
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 83a70688784b..f9c9ecb0cdd3 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -261,7 +261,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
261 261
262 err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); 262 err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
263 263
264 /* Error is cleare after succecful sending to at least one 264 /* Error is cleared after successful sending to at least one
265 * registered KM */ 265 * registered KM */
266 if ((broadcast_flags & BROADCAST_REGISTERED) && err) 266 if ((broadcast_flags & BROADCAST_REGISTERED) && err)
267 err = err2; 267 err = err2;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 8e47f8113495..21a085686dc1 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -152,6 +152,8 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
152#endif 152#endif
153 synchronize_net(); 153 synchronize_net();
154 nf_queue_nf_hook_drop(net, &entry->ops); 154 nf_queue_nf_hook_drop(net, &entry->ops);
155 /* other cpu might still process nfqueue verdict that used reg */
156 synchronize_net();
155 kfree(entry); 157 kfree(entry);
156} 158}
157EXPORT_SYMBOL(nf_unregister_net_hook); 159EXPORT_SYMBOL(nf_unregister_net_hook);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index a1fe5377a2b3..5a30ce6e8c90 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -297,7 +297,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
297 ip_set_timeout_expired(ext_timeout(n, set)))) 297 ip_set_timeout_expired(ext_timeout(n, set))))
298 n = NULL; 298 n = NULL;
299 299
300 e = kzalloc(set->dsize, GFP_KERNEL); 300 e = kzalloc(set->dsize, GFP_ATOMIC);
301 if (!e) 301 if (!e)
302 return -ENOMEM; 302 return -ENOMEM;
303 e->id = d->id; 303 e->id = d->id;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0a49a8c7c564..fafe33bdb619 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2371,7 +2371,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
2371 int pos, idx, shift; 2371 int pos, idx, shift;
2372 2372
2373 err = 0; 2373 err = 0;
2374 netlink_table_grab(); 2374 netlink_lock_table();
2375 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) { 2375 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
2376 if (len - pos < sizeof(u32)) 2376 if (len - pos < sizeof(u32))
2377 break; 2377 break;
@@ -2386,7 +2386,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
2386 } 2386 }
2387 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen)) 2387 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
2388 err = -EFAULT; 2388 err = -EFAULT;
2389 netlink_table_ungrab(); 2389 netlink_unlock_table();
2390 break; 2390 break;
2391 } 2391 }
2392 case NETLINK_CAP_ACK: 2392 case NETLINK_CAP_ACK:
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c6a39bf2c3b9..dba635d086b2 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -768,7 +768,6 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
768 struct sw_flow_key *key, const struct nlattr *attr, 768 struct sw_flow_key *key, const struct nlattr *attr,
769 const struct nlattr *actions, int actions_len) 769 const struct nlattr *actions, int actions_len)
770{ 770{
771 struct ip_tunnel_info info;
772 struct dp_upcall_info upcall; 771 struct dp_upcall_info upcall;
773 const struct nlattr *a; 772 const struct nlattr *a;
774 int rem; 773 int rem;
@@ -796,11 +795,9 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
796 if (vport) { 795 if (vport) {
797 int err; 796 int err;
798 797
799 upcall.egress_tun_info = &info; 798 err = dev_fill_metadata_dst(vport->dev, skb);
800 err = ovs_vport_get_egress_tun_info(vport, skb, 799 if (!err)
801 &upcall); 800 upcall.egress_tun_info = skb_tunnel_info(skb);
802 if (err)
803 upcall.egress_tun_info = NULL;
804 } 801 }
805 802
806 break; 803 break;
@@ -1112,8 +1109,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1112 nla_data(a)); 1109 nla_data(a));
1113 1110
1114 /* Hide stolen IP fragments from user space. */ 1111 /* Hide stolen IP fragments from user space. */
1115 if (err == -EINPROGRESS) 1112 if (err)
1116 return 0; 1113 return err == -EINPROGRESS ? 0 : err;
1117 break; 1114 break;
1118 } 1115 }
1119 1116
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 80bf702715bb..50095820edb7 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -151,6 +151,8 @@ static void ovs_ct_update_key(const struct sk_buff *skb,
151 ct = nf_ct_get(skb, &ctinfo); 151 ct = nf_ct_get(skb, &ctinfo);
152 if (ct) { 152 if (ct) {
153 state = ovs_ct_get_state(ctinfo); 153 state = ovs_ct_get_state(ctinfo);
154 if (!nf_ct_is_confirmed(ct))
155 state |= OVS_CS_F_NEW;
154 if (ct->master) 156 if (ct->master)
155 state |= OVS_CS_F_RELATED; 157 state |= OVS_CS_F_RELATED;
156 zone = nf_ct_zone(ct); 158 zone = nf_ct_zone(ct);
@@ -222,9 +224,6 @@ static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key,
222 struct nf_conn *ct; 224 struct nf_conn *ct;
223 int err; 225 int err;
224 226
225 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS))
226 return -ENOTSUPP;
227
228 /* The connection could be invalid, in which case set_label is no-op.*/ 227 /* The connection could be invalid, in which case set_label is no-op.*/
229 ct = nf_ct_get(skb, &ctinfo); 228 ct = nf_ct_get(skb, &ctinfo);
230 if (!ct) 229 if (!ct)
@@ -294,6 +293,9 @@ static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
294 return helper->help(skb, protoff, ct, ctinfo); 293 return helper->help(skb, protoff, ct, ctinfo);
295} 294}
296 295
296/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
297 * value if 'skb' is freed.
298 */
297static int handle_fragments(struct net *net, struct sw_flow_key *key, 299static int handle_fragments(struct net *net, struct sw_flow_key *key,
298 u16 zone, struct sk_buff *skb) 300 u16 zone, struct sk_buff *skb)
299{ 301{
@@ -309,8 +311,8 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
309 return err; 311 return err;
310 312
311 ovs_cb.mru = IPCB(skb)->frag_max_size; 313 ovs_cb.mru = IPCB(skb)->frag_max_size;
312 } else if (key->eth.type == htons(ETH_P_IPV6)) {
313#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 314#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
315 } else if (key->eth.type == htons(ETH_P_IPV6)) {
314 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 316 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
315 struct sk_buff *reasm; 317 struct sk_buff *reasm;
316 318
@@ -319,17 +321,25 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
319 if (!reasm) 321 if (!reasm)
320 return -EINPROGRESS; 322 return -EINPROGRESS;
321 323
322 if (skb == reasm) 324 if (skb == reasm) {
325 kfree_skb(skb);
323 return -EINVAL; 326 return -EINVAL;
327 }
328
329 /* Don't free 'skb' even though it is one of the original
330 * fragments, as we're going to morph it into the head.
331 */
332 skb_get(skb);
333 nf_ct_frag6_consume_orig(reasm);
324 334
325 key->ip.proto = ipv6_hdr(reasm)->nexthdr; 335 key->ip.proto = ipv6_hdr(reasm)->nexthdr;
326 skb_morph(skb, reasm); 336 skb_morph(skb, reasm);
337 skb->next = reasm->next;
327 consume_skb(reasm); 338 consume_skb(reasm);
328 ovs_cb.mru = IP6CB(skb)->frag_max_size; 339 ovs_cb.mru = IP6CB(skb)->frag_max_size;
329#else
330 return -EPFNOSUPPORT;
331#endif 340#endif
332 } else { 341 } else {
342 kfree_skb(skb);
333 return -EPFNOSUPPORT; 343 return -EPFNOSUPPORT;
334 } 344 }
335 345
@@ -377,7 +387,7 @@ static bool skb_nfct_cached(const struct net *net, const struct sk_buff *skb,
377 return true; 387 return true;
378} 388}
379 389
380static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key, 390static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
381 const struct ovs_conntrack_info *info, 391 const struct ovs_conntrack_info *info,
382 struct sk_buff *skb) 392 struct sk_buff *skb)
383{ 393{
@@ -408,6 +418,8 @@ static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key,
408 } 418 }
409 } 419 }
410 420
421 ovs_ct_update_key(skb, key, true);
422
411 return 0; 423 return 0;
412} 424}
413 425
@@ -430,8 +442,6 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
430 err = __ovs_ct_lookup(net, key, info, skb); 442 err = __ovs_ct_lookup(net, key, info, skb);
431 if (err) 443 if (err)
432 return err; 444 return err;
433
434 ovs_ct_update_key(skb, key, true);
435 } 445 }
436 446
437 return 0; 447 return 0;
@@ -460,8 +470,6 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
460 if (nf_conntrack_confirm(skb) != NF_ACCEPT) 470 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
461 return -EINVAL; 471 return -EINVAL;
462 472
463 ovs_ct_update_key(skb, key, true);
464
465 return 0; 473 return 0;
466} 474}
467 475
@@ -476,6 +484,9 @@ static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
476 return false; 484 return false;
477} 485}
478 486
487/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
488 * value if 'skb' is freed.
489 */
479int ovs_ct_execute(struct net *net, struct sk_buff *skb, 490int ovs_ct_execute(struct net *net, struct sk_buff *skb,
480 struct sw_flow_key *key, 491 struct sw_flow_key *key,
481 const struct ovs_conntrack_info *info) 492 const struct ovs_conntrack_info *info)
@@ -511,6 +522,8 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
511 &info->labels.mask); 522 &info->labels.mask);
512err: 523err:
513 skb_push(skb, nh_ofs); 524 skb_push(skb, nh_ofs);
525 if (err)
526 kfree_skb(skb);
514 return err; 527 return err;
515} 528}
516 529
@@ -587,6 +600,10 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
587 case OVS_CT_ATTR_MARK: { 600 case OVS_CT_ATTR_MARK: {
588 struct md_mark *mark = nla_data(a); 601 struct md_mark *mark = nla_data(a);
589 602
603 if (!mark->mask) {
604 OVS_NLERR(log, "ct_mark mask cannot be 0");
605 return -EINVAL;
606 }
590 info->mark = *mark; 607 info->mark = *mark;
591 break; 608 break;
592 } 609 }
@@ -595,6 +612,10 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
595 case OVS_CT_ATTR_LABELS: { 612 case OVS_CT_ATTR_LABELS: {
596 struct md_labels *labels = nla_data(a); 613 struct md_labels *labels = nla_data(a);
597 614
615 if (!labels_nonzero(&labels->mask)) {
616 OVS_NLERR(log, "ct_labels mask cannot be 0");
617 return -EINVAL;
618 }
598 info->labels = *labels; 619 info->labels = *labels;
599 break; 620 break;
600 } 621 }
@@ -705,11 +726,12 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
705 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 726 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
706 nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id)) 727 nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
707 return -EMSGSIZE; 728 return -EMSGSIZE;
708 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 729 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
709 nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark), 730 nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
710 &ct_info->mark)) 731 &ct_info->mark))
711 return -EMSGSIZE; 732 return -EMSGSIZE;
712 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 733 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
734 labels_nonzero(&ct_info->labels.mask) &&
713 nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels), 735 nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
714 &ct_info->labels)) 736 &ct_info->labels))
715 return -EMSGSIZE; 737 return -EMSGSIZE;
diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h
index da8714942c95..a7544f405c16 100644
--- a/net/openvswitch/conntrack.h
+++ b/net/openvswitch/conntrack.h
@@ -35,12 +35,9 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
35int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb); 35int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
36void ovs_ct_free_action(const struct nlattr *a); 36void ovs_ct_free_action(const struct nlattr *a);
37 37
38static inline bool ovs_ct_state_supported(u32 state) 38#define CT_SUPPORTED_MASK (OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | \
39{ 39 OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR | \
40 return !(state & ~(OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | 40 OVS_CS_F_INVALID | OVS_CS_F_TRACKED)
41 OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR |
42 OVS_CS_F_INVALID | OVS_CS_F_TRACKED));
43}
44#else 41#else
45#include <linux/errno.h> 42#include <linux/errno.h>
46 43
@@ -53,11 +50,6 @@ static inline bool ovs_ct_verify(struct net *net, int attr)
53 return false; 50 return false;
54} 51}
55 52
56static inline bool ovs_ct_state_supported(u32 state)
57{
58 return false;
59}
60
61static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla, 53static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla,
62 const struct sw_flow_key *key, 54 const struct sw_flow_key *key,
63 struct sw_flow_actions **acts, bool log) 55 struct sw_flow_actions **acts, bool log)
@@ -75,6 +67,7 @@ static inline int ovs_ct_execute(struct net *net, struct sk_buff *skb,
75 struct sw_flow_key *key, 67 struct sw_flow_key *key,
76 const struct ovs_conntrack_info *info) 68 const struct ovs_conntrack_info *info)
77{ 69{
70 kfree_skb(skb);
78 return -ENOTSUPP; 71 return -ENOTSUPP;
79} 72}
80 73
@@ -94,5 +87,7 @@ static inline int ovs_ct_put_key(const struct sw_flow_key *key,
94} 87}
95 88
96static inline void ovs_ct_free_action(const struct nlattr *a) { } 89static inline void ovs_ct_free_action(const struct nlattr *a) { }
90
91#define CT_SUPPORTED_MASK 0
97#endif /* CONFIG_NF_CONNTRACK */ 92#endif /* CONFIG_NF_CONNTRACK */
98#endif /* ovs_conntrack.h */ 93#endif /* ovs_conntrack.h */
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index b816ff871528..c5d08ee37730 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -490,9 +490,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
490 490
491 if (upcall_info->egress_tun_info) { 491 if (upcall_info->egress_tun_info) {
492 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY); 492 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
493 err = ovs_nla_put_egress_tunnel_key(user_skb, 493 err = ovs_nla_put_tunnel_info(user_skb,
494 upcall_info->egress_tun_info, 494 upcall_info->egress_tun_info);
495 upcall_info->egress_tun_opts);
496 BUG_ON(err); 495 BUG_ON(err);
497 nla_nest_end(user_skb, nla); 496 nla_nest_end(user_skb, nla);
498 } 497 }
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index f88038a99f44..67bdecd9fdc1 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -117,7 +117,6 @@ struct ovs_skb_cb {
117 */ 117 */
118struct dp_upcall_info { 118struct dp_upcall_info {
119 struct ip_tunnel_info *egress_tun_info; 119 struct ip_tunnel_info *egress_tun_info;
120 const void *egress_tun_opts;
121 const struct nlattr *userdata; 120 const struct nlattr *userdata;
122 const struct nlattr *actions; 121 const struct nlattr *actions;
123 int actions_len; 122 int actions_len;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 171a691f1c32..38536c137c54 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -717,7 +717,7 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
717 if ((output->tun_flags & TUNNEL_OAM) && 717 if ((output->tun_flags & TUNNEL_OAM) &&
718 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM)) 718 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
719 return -EMSGSIZE; 719 return -EMSGSIZE;
720 if (tun_opts) { 720 if (swkey_tun_opts_len) {
721 if (output->tun_flags & TUNNEL_GENEVE_OPT && 721 if (output->tun_flags & TUNNEL_GENEVE_OPT &&
722 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, 722 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
723 swkey_tun_opts_len, tun_opts)) 723 swkey_tun_opts_len, tun_opts))
@@ -749,13 +749,12 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb,
749 return 0; 749 return 0;
750} 750}
751 751
752int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb, 752int ovs_nla_put_tunnel_info(struct sk_buff *skb,
753 const struct ip_tunnel_info *egress_tun_info, 753 struct ip_tunnel_info *tun_info)
754 const void *egress_tun_opts)
755{ 754{
756 return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key, 755 return __ipv4_tun_to_nlattr(skb, &tun_info->key,
757 egress_tun_opts, 756 ip_tunnel_info_opts(tun_info),
758 egress_tun_info->options_len); 757 tun_info->options_len);
759} 758}
760 759
761static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, 760static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
@@ -816,7 +815,7 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
816 ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) { 815 ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
817 u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]); 816 u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
818 817
819 if (!is_mask && !ovs_ct_state_supported(ct_state)) { 818 if (ct_state & ~CT_SUPPORTED_MASK) {
820 OVS_NLERR(log, "ct_state flags %08x unsupported", 819 OVS_NLERR(log, "ct_state flags %08x unsupported",
821 ct_state); 820 ct_state);
822 return -EINVAL; 821 return -EINVAL;
@@ -1099,6 +1098,9 @@ static void nlattr_set(struct nlattr *attr, u8 val,
1099 } else { 1098 } else {
1100 memset(nla_data(nla), val, nla_len(nla)); 1099 memset(nla_data(nla), val, nla_len(nla));
1101 } 1100 }
1101
1102 if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
1103 *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
1102 } 1104 }
1103} 1105}
1104 1106
@@ -2380,10 +2382,7 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
2380 if (!start) 2382 if (!start)
2381 return -EMSGSIZE; 2383 return -EMSGSIZE;
2382 2384
2383 err = ipv4_tun_to_nlattr(skb, &tun_info->key, 2385 err = ovs_nla_put_tunnel_info(skb, tun_info);
2384 tun_info->options_len ?
2385 ip_tunnel_info_opts(tun_info) : NULL,
2386 tun_info->options_len);
2387 if (err) 2386 if (err)
2388 return err; 2387 return err;
2389 nla_nest_end(skb, start); 2388 nla_nest_end(skb, start);
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 6ca3f0baf449..47dd142eca1c 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -55,9 +55,9 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
55int ovs_nla_get_match(struct net *, struct sw_flow_match *, 55int ovs_nla_get_match(struct net *, struct sw_flow_match *,
56 const struct nlattr *key, const struct nlattr *mask, 56 const struct nlattr *key, const struct nlattr *mask,
57 bool log); 57 bool log);
58int ovs_nla_put_egress_tunnel_key(struct sk_buff *, 58
59 const struct ip_tunnel_info *, 59int ovs_nla_put_tunnel_info(struct sk_buff *skb,
60 const void *egress_tun_opts); 60 struct ip_tunnel_info *tun_info);
61 61
62bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log); 62bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
63int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid, 63int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 2735e9c4a3b8..5f8aaaaa0785 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -52,18 +52,6 @@ static int geneve_get_options(const struct vport *vport,
52 return 0; 52 return 0;
53} 53}
54 54
55static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
56 struct dp_upcall_info *upcall)
57{
58 struct geneve_port *geneve_port = geneve_vport(vport);
59 struct net *net = ovs_dp_get_net(vport->dp);
60 __be16 dport = htons(geneve_port->port_no);
61 __be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
62
63 return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
64 skb, IPPROTO_UDP, sport, dport);
65}
66
67static struct vport *geneve_tnl_create(const struct vport_parms *parms) 55static struct vport *geneve_tnl_create(const struct vport_parms *parms)
68{ 56{
69 struct net *net = ovs_dp_get_net(parms->dp); 57 struct net *net = ovs_dp_get_net(parms->dp);
@@ -130,7 +118,6 @@ static struct vport_ops ovs_geneve_vport_ops = {
130 .get_options = geneve_get_options, 118 .get_options = geneve_get_options,
131 .send = ovs_netdev_send, 119 .send = ovs_netdev_send,
132 .owner = THIS_MODULE, 120 .owner = THIS_MODULE,
133 .get_egress_tun_info = geneve_get_egress_tun_info,
134}; 121};
135 122
136static int __init ovs_geneve_tnl_init(void) 123static int __init ovs_geneve_tnl_init(void)
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 4d24481669c9..64225bf5eb40 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -84,18 +84,10 @@ static struct vport *gre_create(const struct vport_parms *parms)
84 return ovs_netdev_link(vport, parms->name); 84 return ovs_netdev_link(vport, parms->name);
85} 85}
86 86
87static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
88 struct dp_upcall_info *upcall)
89{
90 return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
91 skb, IPPROTO_GRE, 0, 0);
92}
93
94static struct vport_ops ovs_gre_vport_ops = { 87static struct vport_ops ovs_gre_vport_ops = {
95 .type = OVS_VPORT_TYPE_GRE, 88 .type = OVS_VPORT_TYPE_GRE,
96 .create = gre_create, 89 .create = gre_create,
97 .send = ovs_netdev_send, 90 .send = ovs_netdev_send,
98 .get_egress_tun_info = gre_get_egress_tun_info,
99 .destroy = ovs_netdev_tunnel_destroy, 91 .destroy = ovs_netdev_tunnel_destroy,
100 .owner = THIS_MODULE, 92 .owner = THIS_MODULE,
101}; 93};
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 388b8a6bf112..b3934126daa8 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -106,12 +106,45 @@ static void internal_dev_destructor(struct net_device *dev)
106 free_netdev(dev); 106 free_netdev(dev);
107} 107}
108 108
109static struct rtnl_link_stats64 *
110internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
111{
112 int i;
113
114 memset(stats, 0, sizeof(*stats));
115 stats->rx_errors = dev->stats.rx_errors;
116 stats->tx_errors = dev->stats.tx_errors;
117 stats->tx_dropped = dev->stats.tx_dropped;
118 stats->rx_dropped = dev->stats.rx_dropped;
119
120 for_each_possible_cpu(i) {
121 const struct pcpu_sw_netstats *percpu_stats;
122 struct pcpu_sw_netstats local_stats;
123 unsigned int start;
124
125 percpu_stats = per_cpu_ptr(dev->tstats, i);
126
127 do {
128 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
129 local_stats = *percpu_stats;
130 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
131
132 stats->rx_bytes += local_stats.rx_bytes;
133 stats->rx_packets += local_stats.rx_packets;
134 stats->tx_bytes += local_stats.tx_bytes;
135 stats->tx_packets += local_stats.tx_packets;
136 }
137
138 return stats;
139}
140
109static const struct net_device_ops internal_dev_netdev_ops = { 141static const struct net_device_ops internal_dev_netdev_ops = {
110 .ndo_open = internal_dev_open, 142 .ndo_open = internal_dev_open,
111 .ndo_stop = internal_dev_stop, 143 .ndo_stop = internal_dev_stop,
112 .ndo_start_xmit = internal_dev_xmit, 144 .ndo_start_xmit = internal_dev_xmit,
113 .ndo_set_mac_address = eth_mac_addr, 145 .ndo_set_mac_address = eth_mac_addr,
114 .ndo_change_mtu = internal_dev_change_mtu, 146 .ndo_change_mtu = internal_dev_change_mtu,
147 .ndo_get_stats64 = internal_get_stats,
115}; 148};
116 149
117static struct rtnl_link_ops internal_dev_link_ops __read_mostly = { 150static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -161,6 +194,11 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
161 err = -ENOMEM; 194 err = -ENOMEM;
162 goto error_free_vport; 195 goto error_free_vport;
163 } 196 }
197 vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
198 if (!vport->dev->tstats) {
199 err = -ENOMEM;
200 goto error_free_netdev;
201 }
164 202
165 dev_net_set(vport->dev, ovs_dp_get_net(vport->dp)); 203 dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
166 internal_dev = internal_dev_priv(vport->dev); 204 internal_dev = internal_dev_priv(vport->dev);
@@ -173,7 +211,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
173 rtnl_lock(); 211 rtnl_lock();
174 err = register_netdevice(vport->dev); 212 err = register_netdevice(vport->dev);
175 if (err) 213 if (err)
176 goto error_free_netdev; 214 goto error_unlock;
177 215
178 dev_set_promiscuity(vport->dev, 1); 216 dev_set_promiscuity(vport->dev, 1);
179 rtnl_unlock(); 217 rtnl_unlock();
@@ -181,8 +219,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
181 219
182 return vport; 220 return vport;
183 221
184error_free_netdev: 222error_unlock:
185 rtnl_unlock(); 223 rtnl_unlock();
224 free_percpu(vport->dev->tstats);
225error_free_netdev:
186 free_netdev(vport->dev); 226 free_netdev(vport->dev);
187error_free_vport: 227error_free_vport:
188 ovs_vport_free(vport); 228 ovs_vport_free(vport);
@@ -198,7 +238,7 @@ static void internal_dev_destroy(struct vport *vport)
198 238
199 /* unregister_netdevice() waits for an RCU grace period. */ 239 /* unregister_netdevice() waits for an RCU grace period. */
200 unregister_netdevice(vport->dev); 240 unregister_netdevice(vport->dev);
201 241 free_percpu(vport->dev->tstats);
202 rtnl_unlock(); 242 rtnl_unlock();
203} 243}
204 244
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index c11413d5075f..e1c9c0888037 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -146,31 +146,12 @@ static struct vport *vxlan_create(const struct vport_parms *parms)
146 return ovs_netdev_link(vport, parms->name); 146 return ovs_netdev_link(vport, parms->name);
147} 147}
148 148
149static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
150 struct dp_upcall_info *upcall)
151{
152 struct vxlan_dev *vxlan = netdev_priv(vport->dev);
153 struct net *net = ovs_dp_get_net(vport->dp);
154 __be16 dst_port = vxlan_dev_dst_port(vxlan);
155 __be16 src_port;
156 int port_min;
157 int port_max;
158
159 inet_get_local_port_range(net, &port_min, &port_max);
160 src_port = udp_flow_src_port(net, skb, 0, 0, true);
161
162 return ovs_tunnel_get_egress_info(upcall, net,
163 skb, IPPROTO_UDP,
164 src_port, dst_port);
165}
166
167static struct vport_ops ovs_vxlan_netdev_vport_ops = { 149static struct vport_ops ovs_vxlan_netdev_vport_ops = {
168 .type = OVS_VPORT_TYPE_VXLAN, 150 .type = OVS_VPORT_TYPE_VXLAN,
169 .create = vxlan_create, 151 .create = vxlan_create,
170 .destroy = ovs_netdev_tunnel_destroy, 152 .destroy = ovs_netdev_tunnel_destroy,
171 .get_options = vxlan_get_options, 153 .get_options = vxlan_get_options,
172 .send = ovs_netdev_send, 154 .send = ovs_netdev_send,
173 .get_egress_tun_info = vxlan_get_egress_tun_info,
174}; 155};
175 156
176static int __init ovs_vxlan_tnl_init(void) 157static int __init ovs_vxlan_tnl_init(void)
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 12a36ac21eda..320c765ce44a 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -479,61 +479,3 @@ void ovs_vport_deferred_free(struct vport *vport)
479 call_rcu(&vport->rcu, free_vport_rcu); 479 call_rcu(&vport->rcu, free_vport_rcu);
480} 480}
481EXPORT_SYMBOL_GPL(ovs_vport_deferred_free); 481EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
482
483int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
484 struct net *net,
485 struct sk_buff *skb,
486 u8 ipproto,
487 __be16 tp_src,
488 __be16 tp_dst)
489{
490 struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info;
491 const struct ip_tunnel_info *tun_info = skb_tunnel_info(skb);
492 const struct ip_tunnel_key *tun_key;
493 u32 skb_mark = skb->mark;
494 struct rtable *rt;
495 struct flowi4 fl;
496
497 if (unlikely(!tun_info))
498 return -EINVAL;
499 if (ip_tunnel_info_af(tun_info) != AF_INET)
500 return -EINVAL;
501
502 tun_key = &tun_info->key;
503
504 /* Route lookup to get srouce IP address.
505 * The process may need to be changed if the corresponding process
506 * in vports ops changed.
507 */
508 rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
509 if (IS_ERR(rt))
510 return PTR_ERR(rt);
511
512 ip_rt_put(rt);
513
514 /* Generate egress_tun_info based on tun_info,
515 * saddr, tp_src and tp_dst
516 */
517 ip_tunnel_key_init(&egress_tun_info->key,
518 fl.saddr, tun_key->u.ipv4.dst,
519 tun_key->tos,
520 tun_key->ttl,
521 tp_src, tp_dst,
522 tun_key->tun_id,
523 tun_key->tun_flags);
524 egress_tun_info->options_len = tun_info->options_len;
525 egress_tun_info->mode = tun_info->mode;
526 upcall->egress_tun_opts = ip_tunnel_info_opts(egress_tun_info);
527 return 0;
528}
529EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
530
531int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
532 struct dp_upcall_info *upcall)
533{
534 /* get_egress_tun_info() is only implemented on tunnel ports. */
535 if (unlikely(!vport->ops->get_egress_tun_info))
536 return -EINVAL;
537
538 return vport->ops->get_egress_tun_info(vport, skb, upcall);
539}
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index a413f3ae6a7b..d341ad6f3afe 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -27,7 +27,6 @@
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/u64_stats_sync.h> 29#include <linux/u64_stats_sync.h>
30#include <net/route.h>
31 30
32#include "datapath.h" 31#include "datapath.h"
33 32
@@ -53,16 +52,6 @@ int ovs_vport_set_upcall_portids(struct vport *, const struct nlattr *pids);
53int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *); 52int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
54u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *); 53u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
55 54
56int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
57 struct net *net,
58 struct sk_buff *,
59 u8 ipproto,
60 __be16 tp_src,
61 __be16 tp_dst);
62
63int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
64 struct dp_upcall_info *upcall);
65
66/** 55/**
67 * struct vport_portids - array of netlink portids of a vport. 56 * struct vport_portids - array of netlink portids of a vport.
68 * must be protected by rcu. 57 * must be protected by rcu.
@@ -140,8 +129,6 @@ struct vport_parms {
140 * have any configuration. 129 * have any configuration.
141 * @send: Send a packet on the device. 130 * @send: Send a packet on the device.
142 * zero for dropped packets or negative for error. 131 * zero for dropped packets or negative for error.
143 * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for
144 * a packet.
145 */ 132 */
146struct vport_ops { 133struct vport_ops {
147 enum ovs_vport_type type; 134 enum ovs_vport_type type;
@@ -154,9 +141,6 @@ struct vport_ops {
154 int (*get_options)(const struct vport *, struct sk_buff *); 141 int (*get_options)(const struct vport *, struct sk_buff *);
155 142
156 void (*send)(struct vport *, struct sk_buff *); 143 void (*send)(struct vport *, struct sk_buff *);
157 int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
158 struct dp_upcall_info *upcall);
159
160 struct module *owner; 144 struct module *owner;
161 struct list_head list; 145 struct list_head list;
162}; 146};
@@ -215,25 +199,6 @@ static inline const char *ovs_vport_name(struct vport *vport)
215int ovs_vport_ops_register(struct vport_ops *ops); 199int ovs_vport_ops_register(struct vport_ops *ops);
216void ovs_vport_ops_unregister(struct vport_ops *ops); 200void ovs_vport_ops_unregister(struct vport_ops *ops);
217 201
218static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
219 const struct ip_tunnel_key *key,
220 u32 mark,
221 struct flowi4 *fl,
222 u8 protocol)
223{
224 struct rtable *rt;
225
226 memset(fl, 0, sizeof(*fl));
227 fl->daddr = key->u.ipv4.dst;
228 fl->saddr = key->u.ipv4.src;
229 fl->flowi4_tos = RT_TOS(key->tos);
230 fl->flowi4_mark = mark;
231 fl->flowi4_proto = protocol;
232
233 rt = ip_route_output_key(net, fl);
234 return rt;
235}
236
237static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb) 202static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
238{ 203{
239 vport->ops->send(vport, skb); 204 vport->ops->send(vport, skb);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index fbc5ef88bc0e..27a992154804 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -214,8 +214,15 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
214 } 214 }
215 215
216 to_copy = min(tc->t_tinc_data_rem, left); 216 to_copy = min(tc->t_tinc_data_rem, left);
217 pskb_pull(clone, offset); 217 if (!pskb_pull(clone, offset) ||
218 pskb_trim(clone, to_copy); 218 pskb_trim(clone, to_copy)) {
219 pr_warn("rds_tcp_data_recv: pull/trim failed "
220 "left %zu data_rem %zu skb_len %d\n",
221 left, tc->t_tinc_data_rem, skb->len);
222 kfree_skb(clone);
223 desc->error = -ENOMEM;
224 goto out;
225 }
219 skb_queue_tail(&tinc->ti_skb_list, clone); 226 skb_queue_tail(&tinc->ti_skb_list, clone);
220 227
221 rdsdebug("skb %p data %p len %d off %u to_copy %zu -> " 228 rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index e7000be321b0..ed98c1fc3de1 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -94,10 +94,14 @@ __init int net_sysctl_init(void)
94 goto out; 94 goto out;
95 ret = register_pernet_subsys(&sysctl_pernet_ops); 95 ret = register_pernet_subsys(&sysctl_pernet_ops);
96 if (ret) 96 if (ret)
97 goto out; 97 goto out1;
98 register_sysctl_root(&net_sysctl_root); 98 register_sysctl_root(&net_sysctl_root);
99out: 99out:
100 return ret; 100 return ret;
101out1:
102 unregister_sysctl_table(net_header);
103 net_header = NULL;
104 goto out;
101} 105}
102 106
103struct ctl_table_header *register_net_sysctl(struct net *net, 107struct ctl_table_header *register_net_sysctl(struct net *net,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 41042de3ae9b..eadba62afa85 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -42,7 +42,8 @@
42#include "core.h" 42#include "core.h"
43 43
44#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 44#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
45#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 45#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
46#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
46 47
47const char tipc_bclink_name[] = "broadcast-link"; 48const char tipc_bclink_name[] = "broadcast-link";
48 49
@@ -908,9 +909,10 @@ int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
908 909
909 if (!bcl) 910 if (!bcl)
910 return -ENOPROTOOPT; 911 return -ENOPROTOOPT;
911 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) 912 if (limit < BCLINK_WIN_MIN)
913 limit = BCLINK_WIN_MIN;
914 if (limit > TIPC_MAX_LINK_WIN)
912 return -EINVAL; 915 return -EINVAL;
913
914 tipc_bclink_lock(net); 916 tipc_bclink_lock(net);
915 tipc_link_set_queue_limits(bcl, limit); 917 tipc_link_set_queue_limits(bcl, limit);
916 tipc_bclink_unlock(net); 918 tipc_bclink_unlock(net);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index c5ac436235e0..5f73450159df 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -121,7 +121,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
121{ 121{
122 struct sk_buff *head = *headbuf; 122 struct sk_buff *head = *headbuf;
123 struct sk_buff *frag = *buf; 123 struct sk_buff *frag = *buf;
124 struct sk_buff *tail; 124 struct sk_buff *tail = NULL;
125 struct tipc_msg *msg; 125 struct tipc_msg *msg;
126 u32 fragid; 126 u32 fragid;
127 int delta; 127 int delta;
@@ -141,9 +141,15 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
141 if (unlikely(skb_unclone(frag, GFP_ATOMIC))) 141 if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
142 goto err; 142 goto err;
143 head = *headbuf = frag; 143 head = *headbuf = frag;
144 skb_frag_list_init(head);
145 TIPC_SKB_CB(head)->tail = NULL;
146 *buf = NULL; 144 *buf = NULL;
145 TIPC_SKB_CB(head)->tail = NULL;
146 if (skb_is_nonlinear(head)) {
147 skb_walk_frags(head, tail) {
148 TIPC_SKB_CB(head)->tail = tail;
149 }
150 } else {
151 skb_frag_list_init(head);
152 }
147 return 0; 153 return 0;
148 } 154 }
149 155
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index c170d3138953..6e648d90297a 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -52,6 +52,8 @@
52/* IANA assigned UDP port */ 52/* IANA assigned UDP port */
53#define UDP_PORT_DEFAULT 6118 53#define UDP_PORT_DEFAULT 6118
54 54
55#define UDP_MIN_HEADROOM 28
56
55static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = { 57static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
56 [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC}, 58 [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
57 [TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY, 59 [TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY,
@@ -156,6 +158,9 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
156 struct sk_buff *clone; 158 struct sk_buff *clone;
157 struct rtable *rt; 159 struct rtable *rt;
158 160
161 if (skb_headroom(skb) < UDP_MIN_HEADROOM)
162 pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
163
159 clone = skb_clone(skb, GFP_ATOMIC); 164 clone = skb_clone(skb, GFP_ATOMIC);
160 skb_set_inner_protocol(clone, htons(ETH_P_TIPC)); 165 skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
161 ub = rcu_dereference_rtnl(b->media_ptr); 166 ub = rcu_dereference_rtnl(b->media_ptr);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index df5fc6b340f1..00e8a349aabc 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1948,13 +1948,13 @@ int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
1948 err = misc_register(&vsock_device); 1948 err = misc_register(&vsock_device);
1949 if (err) { 1949 if (err) {
1950 pr_err("Failed to register misc device\n"); 1950 pr_err("Failed to register misc device\n");
1951 return -ENOENT; 1951 goto err_reset_transport;
1952 } 1952 }
1953 1953
1954 err = proto_register(&vsock_proto, 1); /* we want our slab */ 1954 err = proto_register(&vsock_proto, 1); /* we want our slab */
1955 if (err) { 1955 if (err) {
1956 pr_err("Cannot register vsock protocol\n"); 1956 pr_err("Cannot register vsock protocol\n");
1957 goto err_misc_deregister; 1957 goto err_deregister_misc;
1958 } 1958 }
1959 1959
1960 err = sock_register(&vsock_family_ops); 1960 err = sock_register(&vsock_family_ops);
@@ -1969,8 +1969,9 @@ int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
1969 1969
1970err_unregister_proto: 1970err_unregister_proto:
1971 proto_unregister(&vsock_proto); 1971 proto_unregister(&vsock_proto);
1972err_misc_deregister: 1972err_deregister_misc:
1973 misc_deregister(&vsock_device); 1973 misc_deregister(&vsock_device);
1974err_reset_transport:
1974 transport = NULL; 1975 transport = NULL;
1975err_busy: 1976err_busy:
1976 mutex_unlock(&vsock_register_mutex); 1977 mutex_unlock(&vsock_register_mutex);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 1f63daff3965..7555cad83a75 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -40,13 +40,11 @@
40 40
41static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg); 41static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
42static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg); 42static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
43static void vmci_transport_peer_attach_cb(u32 sub_id,
44 const struct vmci_event_data *ed,
45 void *client_data);
46static void vmci_transport_peer_detach_cb(u32 sub_id, 43static void vmci_transport_peer_detach_cb(u32 sub_id,
47 const struct vmci_event_data *ed, 44 const struct vmci_event_data *ed,
48 void *client_data); 45 void *client_data);
49static void vmci_transport_recv_pkt_work(struct work_struct *work); 46static void vmci_transport_recv_pkt_work(struct work_struct *work);
47static void vmci_transport_cleanup(struct work_struct *work);
50static int vmci_transport_recv_listen(struct sock *sk, 48static int vmci_transport_recv_listen(struct sock *sk,
51 struct vmci_transport_packet *pkt); 49 struct vmci_transport_packet *pkt);
52static int vmci_transport_recv_connecting_server( 50static int vmci_transport_recv_connecting_server(
@@ -75,6 +73,10 @@ struct vmci_transport_recv_pkt_info {
75 struct vmci_transport_packet pkt; 73 struct vmci_transport_packet pkt;
76}; 74};
77 75
76static LIST_HEAD(vmci_transport_cleanup_list);
77static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
78static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
79
78static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID, 80static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
79 VMCI_INVALID_ID }; 81 VMCI_INVALID_ID };
80static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID; 82static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
@@ -791,44 +793,6 @@ out:
791 return err; 793 return err;
792} 794}
793 795
794static void vmci_transport_peer_attach_cb(u32 sub_id,
795 const struct vmci_event_data *e_data,
796 void *client_data)
797{
798 struct sock *sk = client_data;
799 const struct vmci_event_payload_qp *e_payload;
800 struct vsock_sock *vsk;
801
802 e_payload = vmci_event_data_const_payload(e_data);
803
804 vsk = vsock_sk(sk);
805
806 /* We don't ask for delayed CBs when we subscribe to this event (we
807 * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
808 * guarantees in that case about what context we might be running in,
809 * so it could be BH or process, blockable or non-blockable. So we
810 * need to account for all possible contexts here.
811 */
812 local_bh_disable();
813 bh_lock_sock(sk);
814
815 /* XXX This is lame, we should provide a way to lookup sockets by
816 * qp_handle.
817 */
818 if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
819 e_payload->handle)) {
820 /* XXX This doesn't do anything, but in the future we may want
821 * to set a flag here to verify the attach really did occur and
822 * we weren't just sent a datagram claiming it was.
823 */
824 goto out;
825 }
826
827out:
828 bh_unlock_sock(sk);
829 local_bh_enable();
830}
831
832static void vmci_transport_handle_detach(struct sock *sk) 796static void vmci_transport_handle_detach(struct sock *sk)
833{ 797{
834 struct vsock_sock *vsk; 798 struct vsock_sock *vsk;
@@ -871,28 +835,38 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
871 const struct vmci_event_data *e_data, 835 const struct vmci_event_data *e_data,
872 void *client_data) 836 void *client_data)
873{ 837{
874 struct sock *sk = client_data; 838 struct vmci_transport *trans = client_data;
875 const struct vmci_event_payload_qp *e_payload; 839 const struct vmci_event_payload_qp *e_payload;
876 struct vsock_sock *vsk;
877 840
878 e_payload = vmci_event_data_const_payload(e_data); 841 e_payload = vmci_event_data_const_payload(e_data);
879 vsk = vsock_sk(sk);
880 if (vmci_handle_is_invalid(e_payload->handle))
881 return;
882
883 /* Same rules for locking as for peer_attach_cb(). */
884 local_bh_disable();
885 bh_lock_sock(sk);
886 842
887 /* XXX This is lame, we should provide a way to lookup sockets by 843 /* XXX This is lame, we should provide a way to lookup sockets by
888 * qp_handle. 844 * qp_handle.
889 */ 845 */
890 if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle, 846 if (vmci_handle_is_invalid(e_payload->handle) ||
891 e_payload->handle)) 847 vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
892 vmci_transport_handle_detach(sk); 848 return;
893 849
894 bh_unlock_sock(sk); 850 /* We don't ask for delayed CBs when we subscribe to this event (we
895 local_bh_enable(); 851 * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
852 * guarantees in that case about what context we might be running in,
853 * so it could be BH or process, blockable or non-blockable. So we
854 * need to account for all possible contexts here.
855 */
856 spin_lock_bh(&trans->lock);
857 if (!trans->sk)
858 goto out;
859
860 /* Apart from here, trans->lock is only grabbed as part of sk destruct,
861 * where trans->sk isn't locked.
862 */
863 bh_lock_sock(trans->sk);
864
865 vmci_transport_handle_detach(trans->sk);
866
867 bh_unlock_sock(trans->sk);
868 out:
869 spin_unlock_bh(&trans->lock);
896} 870}
897 871
898static void vmci_transport_qp_resumed_cb(u32 sub_id, 872static void vmci_transport_qp_resumed_cb(u32 sub_id,
@@ -1181,7 +1155,7 @@ vmci_transport_recv_connecting_server(struct sock *listener,
1181 */ 1155 */
1182 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH, 1156 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1183 vmci_transport_peer_detach_cb, 1157 vmci_transport_peer_detach_cb,
1184 pending, &detach_sub_id); 1158 vmci_trans(vpending), &detach_sub_id);
1185 if (err < VMCI_SUCCESS) { 1159 if (err < VMCI_SUCCESS) {
1186 vmci_transport_send_reset(pending, pkt); 1160 vmci_transport_send_reset(pending, pkt);
1187 err = vmci_transport_error_to_vsock_error(err); 1161 err = vmci_transport_error_to_vsock_error(err);
@@ -1321,7 +1295,6 @@ vmci_transport_recv_connecting_client(struct sock *sk,
1321 || vmci_trans(vsk)->qpair 1295 || vmci_trans(vsk)->qpair
1322 || vmci_trans(vsk)->produce_size != 0 1296 || vmci_trans(vsk)->produce_size != 0
1323 || vmci_trans(vsk)->consume_size != 0 1297 || vmci_trans(vsk)->consume_size != 0
1324 || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID
1325 || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) { 1298 || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
1326 skerr = EPROTO; 1299 skerr = EPROTO;
1327 err = -EINVAL; 1300 err = -EINVAL;
@@ -1389,7 +1362,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
1389 struct vsock_sock *vsk; 1362 struct vsock_sock *vsk;
1390 struct vmci_handle handle; 1363 struct vmci_handle handle;
1391 struct vmci_qp *qpair; 1364 struct vmci_qp *qpair;
1392 u32 attach_sub_id;
1393 u32 detach_sub_id; 1365 u32 detach_sub_id;
1394 bool is_local; 1366 bool is_local;
1395 u32 flags; 1367 u32 flags;
@@ -1399,7 +1371,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
1399 1371
1400 vsk = vsock_sk(sk); 1372 vsk = vsock_sk(sk);
1401 handle = VMCI_INVALID_HANDLE; 1373 handle = VMCI_INVALID_HANDLE;
1402 attach_sub_id = VMCI_INVALID_ID;
1403 detach_sub_id = VMCI_INVALID_ID; 1374 detach_sub_id = VMCI_INVALID_ID;
1404 1375
1405 /* If we have gotten here then we should be past the point where old 1376 /* If we have gotten here then we should be past the point where old
@@ -1444,23 +1415,15 @@ static int vmci_transport_recv_connecting_client_negotiate(
1444 goto destroy; 1415 goto destroy;
1445 } 1416 }
1446 1417
1447 /* Subscribe to attach and detach events first. 1418 /* Subscribe to detach events first.
1448 * 1419 *
1449 * XXX We attach once for each queue pair created for now so it is easy 1420 * XXX We attach once for each queue pair created for now so it is easy
1450 * to find the socket (it's provided), but later we should only 1421 * to find the socket (it's provided), but later we should only
1451 * subscribe once and add a way to lookup sockets by queue pair handle. 1422 * subscribe once and add a way to lookup sockets by queue pair handle.
1452 */ 1423 */
1453 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
1454 vmci_transport_peer_attach_cb,
1455 sk, &attach_sub_id);
1456 if (err < VMCI_SUCCESS) {
1457 err = vmci_transport_error_to_vsock_error(err);
1458 goto destroy;
1459 }
1460
1461 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH, 1424 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1462 vmci_transport_peer_detach_cb, 1425 vmci_transport_peer_detach_cb,
1463 sk, &detach_sub_id); 1426 vmci_trans(vsk), &detach_sub_id);
1464 if (err < VMCI_SUCCESS) { 1427 if (err < VMCI_SUCCESS) {
1465 err = vmci_transport_error_to_vsock_error(err); 1428 err = vmci_transport_error_to_vsock_error(err);
1466 goto destroy; 1429 goto destroy;
@@ -1496,7 +1459,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
1496 vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 1459 vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
1497 pkt->u.size; 1460 pkt->u.size;
1498 1461
1499 vmci_trans(vsk)->attach_sub_id = attach_sub_id;
1500 vmci_trans(vsk)->detach_sub_id = detach_sub_id; 1462 vmci_trans(vsk)->detach_sub_id = detach_sub_id;
1501 1463
1502 vmci_trans(vsk)->notify_ops->process_negotiate(sk); 1464 vmci_trans(vsk)->notify_ops->process_negotiate(sk);
@@ -1504,9 +1466,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
1504 return 0; 1466 return 0;
1505 1467
1506destroy: 1468destroy:
1507 if (attach_sub_id != VMCI_INVALID_ID)
1508 vmci_event_unsubscribe(attach_sub_id);
1509
1510 if (detach_sub_id != VMCI_INVALID_ID) 1469 if (detach_sub_id != VMCI_INVALID_ID)
1511 vmci_event_unsubscribe(detach_sub_id); 1470 vmci_event_unsubscribe(detach_sub_id);
1512 1471
@@ -1607,9 +1566,11 @@ static int vmci_transport_socket_init(struct vsock_sock *vsk,
1607 vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE; 1566 vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
1608 vmci_trans(vsk)->qpair = NULL; 1567 vmci_trans(vsk)->qpair = NULL;
1609 vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0; 1568 vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
1610 vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id = 1569 vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
1611 VMCI_INVALID_ID;
1612 vmci_trans(vsk)->notify_ops = NULL; 1570 vmci_trans(vsk)->notify_ops = NULL;
1571 INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
1572 vmci_trans(vsk)->sk = &vsk->sk;
1573 spin_lock_init(&vmci_trans(vsk)->lock);
1613 if (psk) { 1574 if (psk) {
1614 vmci_trans(vsk)->queue_pair_size = 1575 vmci_trans(vsk)->queue_pair_size =
1615 vmci_trans(psk)->queue_pair_size; 1576 vmci_trans(psk)->queue_pair_size;
@@ -1629,29 +1590,57 @@ static int vmci_transport_socket_init(struct vsock_sock *vsk,
1629 return 0; 1590 return 0;
1630} 1591}
1631 1592
1632static void vmci_transport_destruct(struct vsock_sock *vsk) 1593static void vmci_transport_free_resources(struct list_head *transport_list)
1633{ 1594{
1634 if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) { 1595 while (!list_empty(transport_list)) {
1635 vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id); 1596 struct vmci_transport *transport =
1636 vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID; 1597 list_first_entry(transport_list, struct vmci_transport,
1637 } 1598 elem);
1599 list_del(&transport->elem);
1638 1600
1639 if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) { 1601 if (transport->detach_sub_id != VMCI_INVALID_ID) {
1640 vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id); 1602 vmci_event_unsubscribe(transport->detach_sub_id);
1641 vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID; 1603 transport->detach_sub_id = VMCI_INVALID_ID;
1642 } 1604 }
1643 1605
1644 if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) { 1606 if (!vmci_handle_is_invalid(transport->qp_handle)) {
1645 vmci_qpair_detach(&vmci_trans(vsk)->qpair); 1607 vmci_qpair_detach(&transport->qpair);
1646 vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE; 1608 transport->qp_handle = VMCI_INVALID_HANDLE;
1647 vmci_trans(vsk)->produce_size = 0; 1609 transport->produce_size = 0;
1648 vmci_trans(vsk)->consume_size = 0; 1610 transport->consume_size = 0;
1611 }
1612
1613 kfree(transport);
1649 } 1614 }
1615}
1616
1617static void vmci_transport_cleanup(struct work_struct *work)
1618{
1619 LIST_HEAD(pending);
1620
1621 spin_lock_bh(&vmci_transport_cleanup_lock);
1622 list_replace_init(&vmci_transport_cleanup_list, &pending);
1623 spin_unlock_bh(&vmci_transport_cleanup_lock);
1624 vmci_transport_free_resources(&pending);
1625}
1626
1627static void vmci_transport_destruct(struct vsock_sock *vsk)
1628{
1629 /* Ensure that the detach callback doesn't use the sk/vsk
1630 * we are about to destruct.
1631 */
1632 spin_lock_bh(&vmci_trans(vsk)->lock);
1633 vmci_trans(vsk)->sk = NULL;
1634 spin_unlock_bh(&vmci_trans(vsk)->lock);
1650 1635
1651 if (vmci_trans(vsk)->notify_ops) 1636 if (vmci_trans(vsk)->notify_ops)
1652 vmci_trans(vsk)->notify_ops->socket_destruct(vsk); 1637 vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
1653 1638
1654 kfree(vsk->trans); 1639 spin_lock_bh(&vmci_transport_cleanup_lock);
1640 list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
1641 spin_unlock_bh(&vmci_transport_cleanup_lock);
1642 schedule_work(&vmci_transport_cleanup_work);
1643
1655 vsk->trans = NULL; 1644 vsk->trans = NULL;
1656} 1645}
1657 1646
@@ -2146,6 +2135,9 @@ module_init(vmci_transport_init);
2146 2135
2147static void __exit vmci_transport_exit(void) 2136static void __exit vmci_transport_exit(void)
2148{ 2137{
2138 cancel_work_sync(&vmci_transport_cleanup_work);
2139 vmci_transport_free_resources(&vmci_transport_cleanup_list);
2140
2149 if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) { 2141 if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
2150 if (vmci_datagram_destroy_handle( 2142 if (vmci_datagram_destroy_handle(
2151 vmci_transport_stream_handle) != VMCI_SUCCESS) 2143 vmci_transport_stream_handle) != VMCI_SUCCESS)
@@ -2164,6 +2156,7 @@ module_exit(vmci_transport_exit);
2164 2156
2165MODULE_AUTHOR("VMware, Inc."); 2157MODULE_AUTHOR("VMware, Inc.");
2166MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2158MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2159MODULE_VERSION("1.0.2.0-k");
2167MODULE_LICENSE("GPL v2"); 2160MODULE_LICENSE("GPL v2");
2168MODULE_ALIAS("vmware_vsock"); 2161MODULE_ALIAS("vmware_vsock");
2169MODULE_ALIAS_NETPROTO(PF_VSOCK); 2162MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index ce6c9623d5f0..2ad46f39649f 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -119,10 +119,12 @@ struct vmci_transport {
119 u64 queue_pair_size; 119 u64 queue_pair_size;
120 u64 queue_pair_min_size; 120 u64 queue_pair_min_size;
121 u64 queue_pair_max_size; 121 u64 queue_pair_max_size;
122 u32 attach_sub_id;
123 u32 detach_sub_id; 122 u32 detach_sub_id;
124 union vmci_transport_notify notify; 123 union vmci_transport_notify notify;
125 struct vmci_transport_notify_ops *notify_ops; 124 struct vmci_transport_notify_ops *notify_ops;
125 struct list_head elem;
126 struct sock *sk;
127 spinlock_t lock; /* protects sk. */
126}; 128};
127 129
128int vmci_transport_register(void); 130int vmci_transport_register(void);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index a8de9e300200..24e06a2377f6 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1928,8 +1928,10 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1928 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 1928 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1929 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; 1929 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
1930 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 1930 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1931 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
1932 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
1931 1933
1932 if (!lt && !rp && !re) 1934 if (!lt && !rp && !re && !et && !rt)
1933 return err; 1935 return err;
1934 1936
1935 /* pedantic mode - thou shalt sayeth replaceth */ 1937 /* pedantic mode - thou shalt sayeth replaceth */
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 3a44d3a272af..af44e564d6dd 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -86,5 +86,17 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
86#define PT_REGS_RC(x) ((x)->gprs[2]) 86#define PT_REGS_RC(x) ((x)->gprs[2])
87#define PT_REGS_SP(x) ((x)->gprs[15]) 87#define PT_REGS_SP(x) ((x)->gprs[15])
88 88
89#elif defined(__aarch64__)
90
91#define PT_REGS_PARM1(x) ((x)->regs[0])
92#define PT_REGS_PARM2(x) ((x)->regs[1])
93#define PT_REGS_PARM3(x) ((x)->regs[2])
94#define PT_REGS_PARM4(x) ((x)->regs[3])
95#define PT_REGS_PARM5(x) ((x)->regs[4])
96#define PT_REGS_RET(x) ((x)->regs[30])
97#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
98#define PT_REGS_RC(x) ((x)->regs[0])
99#define PT_REGS_SP(x) ((x)->sp)
100
89#endif 101#endif
90#endif 102#endif
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 4449d1a99089..2433f7c81472 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/io.h>
22#include <sound/hdaudio_ext.h> 23#include <sound/hdaudio_ext.h>
23 24
24MODULE_DESCRIPTION("HDA extended core"); 25MODULE_DESCRIPTION("HDA extended core");
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 37f43a1b34ef..a249d5486889 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3367,10 +3367,8 @@ int snd_hda_codec_build_pcms(struct hda_codec *codec)
3367 int dev, err; 3367 int dev, err;
3368 3368
3369 err = snd_hda_codec_parse_pcms(codec); 3369 err = snd_hda_codec_parse_pcms(codec);
3370 if (err < 0) { 3370 if (err < 0)
3371 snd_hda_codec_reset(codec);
3372 return err; 3371 return err;
3373 }
3374 3372
3375 /* attach a new PCM streams */ 3373 /* attach a new PCM streams */
3376 list_for_each_entry(cpcm, &codec->pcm_list_head, list) { 3374 list_for_each_entry(cpcm, &codec->pcm_list_head, list) {
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index ca03c40609fc..2f0ec7c45fc7 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -819,6 +819,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
819 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410), 819 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
820 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410), 820 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
821 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD), 821 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
822 SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
822 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), 823 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
823 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), 824 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
824 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), 825 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index 3c2f0f8d6266..f823eb502367 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -50,24 +50,24 @@ struct rt298_priv {
50}; 50};
51 51
52static struct reg_default rt298_index_def[] = { 52static struct reg_default rt298_index_def[] = {
53 { 0x01, 0xaaaa }, 53 { 0x01, 0xa5a8 },
54 { 0x02, 0x8aaa }, 54 { 0x02, 0x8e95 },
55 { 0x03, 0x0002 }, 55 { 0x03, 0x0002 },
56 { 0x04, 0xaf01 }, 56 { 0x04, 0xaf67 },
57 { 0x08, 0x000d }, 57 { 0x08, 0x200f },
58 { 0x09, 0xd810 }, 58 { 0x09, 0xd010 },
59 { 0x0a, 0x0120 }, 59 { 0x0a, 0x0100 },
60 { 0x0b, 0x0000 }, 60 { 0x0b, 0x0000 },
61 { 0x0d, 0x2800 }, 61 { 0x0d, 0x2800 },
62 { 0x0f, 0x0000 }, 62 { 0x0f, 0x0022 },
63 { 0x19, 0x0a17 }, 63 { 0x19, 0x0217 },
64 { 0x20, 0x0020 }, 64 { 0x20, 0x0020 },
65 { 0x33, 0x0208 }, 65 { 0x33, 0x0208 },
66 { 0x46, 0x0300 }, 66 { 0x46, 0x0300 },
67 { 0x49, 0x0004 }, 67 { 0x49, 0x4004 },
68 { 0x4f, 0x50e9 }, 68 { 0x4f, 0x50c9 },
69 { 0x50, 0x2000 }, 69 { 0x50, 0x3000 },
70 { 0x63, 0x2902 }, 70 { 0x63, 0x1b02 },
71 { 0x67, 0x1111 }, 71 { 0x67, 0x1111 },
72 { 0x68, 0x1016 }, 72 { 0x68, 0x1016 },
73 { 0x69, 0x273f }, 73 { 0x69, 0x273f },
@@ -1214,7 +1214,7 @@ static int rt298_i2c_probe(struct i2c_client *i2c,
1214 mdelay(10); 1214 mdelay(10);
1215 1215
1216 if (!rt298->pdata.gpio2_en) 1216 if (!rt298->pdata.gpio2_en)
1217 regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x4000); 1217 regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x40);
1218 else 1218 else
1219 regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0); 1219 regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0);
1220 1220
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 2fbc6ef8cbdb..39ebd7bf4f53 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3808,6 +3808,8 @@ static int wm8962_runtime_resume(struct device *dev)
3808 3808
3809 wm8962_reset(wm8962); 3809 wm8962_reset(wm8962);
3810 3810
3811 regcache_mark_dirty(wm8962->regmap);
3812
3811 /* SYSCLK defaults to on; make sure it is off so we can safely 3813 /* SYSCLK defaults to on; make sure it is off so we can safely
3812 * write to registers if the device is declocked. 3814 * write to registers if the device is declocked.
3813 */ 3815 */
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 100d92b5b77e..05977ae1ff2a 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -207,6 +207,34 @@ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
207EXPORT_SYMBOL_GPL(snd_soc_info_volsw); 207EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
208 208
209/** 209/**
210 * snd_soc_info_volsw_sx - Mixer info callback for SX TLV controls
211 * @kcontrol: mixer control
212 * @uinfo: control element information
213 *
214 * Callback to provide information about a single mixer control, or a double
215 * mixer control that spans 2 registers of the SX TLV type. SX TLV controls
216 * have a range that represents both positive and negative values either side
217 * of zero but without a sign bit.
218 *
219 * Returns 0 for success.
220 */
221int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
222 struct snd_ctl_elem_info *uinfo)
223{
224 struct soc_mixer_control *mc =
225 (struct soc_mixer_control *)kcontrol->private_value;
226
227 snd_soc_info_volsw(kcontrol, uinfo);
228 /* Max represents the number of levels in an SX control not the
229 * maximum value, so add the minimum value back on
230 */
231 uinfo->value.integer.max += mc->min;
232
233 return 0;
234}
235EXPORT_SYMBOL_GPL(snd_soc_info_volsw_sx);
236
237/**
210 * snd_soc_get_volsw - single mixer get callback 238 * snd_soc_get_volsw - single mixer get callback
211 * @kcontrol: mixer control 239 * @kcontrol: mixer control
212 * @ucontrol: control element information 240 * @ucontrol: control element information
diff --git a/tools/build/.gitignore b/tools/build/.gitignore
new file mode 100644
index 000000000000..a776371a3502
--- /dev/null
+++ b/tools/build/.gitignore
@@ -0,0 +1 @@
fixdep
diff --git a/tools/build/Build b/tools/build/Build
new file mode 100644
index 000000000000..63a6c34c0c88
--- /dev/null
+++ b/tools/build/Build
@@ -0,0 +1 @@
fixdep-y := fixdep.o
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 4c8daaccb82a..4d000bc959b4 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -55,14 +55,25 @@ make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1)))))
55any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^) 55any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^)
56 56
57### 57###
58# Copy dependency data into .cmd file
59# - gcc -M dependency info
60# - command line to create object 'cmd_object :='
61dep-cmd = $(if $(wildcard $(fixdep)), \
62 $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \
63 rm -f $(depfile); \
64 mv -f $(dot-target).tmp $(dot-target).cmd, \
65 printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
66 printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \
67 cat $(depfile) >> $(dot-target).cmd; \
68 printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
69
70###
58# if_changed_dep - execute command if any prerequisite is newer than 71# if_changed_dep - execute command if any prerequisite is newer than
59# target, or command line has changed and update 72# target, or command line has changed and update
60# dependencies in the cmd file 73# dependencies in the cmd file
61if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \ 74if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \
62 @set -e; \ 75 @set -e; \
63 $(echo-cmd) $(cmd_$(1)); \ 76 $(echo-cmd) $(cmd_$(1)) && $(dep-cmd))
64 cat $(depfile) > $(dot-target).cmd; \
65 printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
66 77
67# if_changed - execute command if any prerequisite is newer than 78# if_changed - execute command if any prerequisite is newer than
68# target, or command line has changed 79# target, or command line has changed
diff --git a/tools/build/Documentation/Build.txt b/tools/build/Documentation/Build.txt
index aa5e092c4352..a47bffbae159 100644
--- a/tools/build/Documentation/Build.txt
+++ b/tools/build/Documentation/Build.txt
@@ -11,8 +11,9 @@ Unlike the kernel we don't have a single build object 'obj-y' list that where
11we setup source objects, but we support more. This allows one 'Build' file to 11we setup source objects, but we support more. This allows one 'Build' file to
12carry a sources list for multiple build objects. 12carry a sources list for multiple build objects.
13 13
14a) Build framework makefiles 14
15---------------------------- 15Build framework makefiles
16-------------------------
16 17
17The build framework consists of 2 Makefiles: 18The build framework consists of 2 Makefiles:
18 19
@@ -23,7 +24,7 @@ While the 'Build.include' file contains just some generic definitions, the
23'Makefile.build' file is the makefile used from the outside. It's 24'Makefile.build' file is the makefile used from the outside. It's
24interface/usage is following: 25interface/usage is following:
25 26
26 $ make -f tools/build/Makefile srctree=$(KSRC) dir=$(DIR) obj=$(OBJECT) 27 $ make -f tools/build/Makefile.build srctree=$(KSRC) dir=$(DIR) obj=$(OBJECT)
27 28
28where: 29where:
29 30
@@ -38,8 +39,9 @@ called $(OBJECT)-in.o:
38 39
39which includes all compiled sources described in 'Build' makefiles. 40which includes all compiled sources described in 'Build' makefiles.
40 41
41a) Build makefiles 42
42------------------ 43Build makefiles
44---------------
43 45
44The user supplies 'Build' makefiles that contains a objects list, and connects 46The user supplies 'Build' makefiles that contains a objects list, and connects
45the build to nested directories. 47the build to nested directories.
@@ -95,8 +97,31 @@ It's only a matter of 2 single commands to create the final binaries:
95 97
96You can check the 'ex' example in 'tools/build/tests/ex' for more details. 98You can check the 'ex' example in 'tools/build/tests/ex' for more details.
97 99
98b) Rules 100
99-------- 101Makefile.include
102----------------
103
104The tools/build/Makefile.include makefile could be included
105via user makefiles to get usefull definitions.
106
107It defines following interface:
108
109 - build macro definition:
110 build := -f $(srctree)/tools/build/Makefile.build dir=. obj
111
112 to make it easier to invoke build like:
113 make $(build)=ex
114
115
116Fixdep
117------
118It is necessary to build the fixdep helper before invoking the build.
119The Makefile.include file adds the fixdep target, that could be
120invoked by the user.
121
122
123Rules
124-----
100 125
101The build framework provides standard compilation rules to handle .S and .c 126The build framework provides standard compilation rules to handle .S and .c
102compilation. 127compilation.
@@ -104,8 +129,9 @@ compilation.
104It's possible to include special rule if needed (like we do for flex or bison 129It's possible to include special rule if needed (like we do for flex or bison
105code generation). 130code generation).
106 131
107c) CFLAGS 132
108--------- 133CFLAGS
134------
109 135
110It's possible to alter the standard object C flags in the following way: 136It's possible to alter the standard object C flags in the following way:
111 137
@@ -115,8 +141,8 @@ It's possible to alter the standard object C flags in the following way:
115This C flags changes has the scope of the Build makefile they are defined in. 141This C flags changes has the scope of the Build makefile they are defined in.
116 142
117 143
118d) Dependencies 144Dependencies
119--------------- 145------------
120 146
121For each built object file 'a.o' the '.a.cmd' is created and holds: 147For each built object file 'a.o' the '.a.cmd' is created and holds:
122 148
@@ -130,8 +156,8 @@ All existing '.cmd' files are included in the Build process to follow properly
130the dependencies and trigger a rebuild when necessary. 156the dependencies and trigger a rebuild when necessary.
131 157
132 158
133e) Single rules 159Single rules
134--------------- 160------------
135 161
136It's possible to build single object file by choice, like: 162It's possible to build single object file by choice, like:
137 163
diff --git a/tools/build/Makefile b/tools/build/Makefile
new file mode 100644
index 000000000000..a93036272d43
--- /dev/null
+++ b/tools/build/Makefile
@@ -0,0 +1,43 @@
1ifeq ($(srctree),)
2srctree := $(patsubst %/,%,$(dir $(shell pwd)))
3srctree := $(patsubst %/,%,$(dir $(srctree)))
4endif
5
6include $(srctree)/tools//scripts/Makefile.include
7
8define allow-override
9 $(if $(or $(findstring environment,$(origin $(1))),\
10 $(findstring command line,$(origin $(1)))),,\
11 $(eval $(1) = $(2)))
12endef
13
14$(call allow-override,CC,$(CROSS_COMPILE)gcc)
15$(call allow-override,LD,$(CROSS_COMPILE)ld)
16
17ifeq ($(V),1)
18 Q =
19else
20 Q = @
21endif
22
23export Q srctree CC LD
24
25MAKEFLAGS := --no-print-directory
26build := -f $(srctree)/tools/build/Makefile.build dir=. obj
27
28all: fixdep
29
30clean:
31 $(call QUIET_CLEAN, fixdep)
32 $(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
33 $(Q)rm -f fixdep
34
35$(OUTPUT)fixdep-in.o: FORCE
36 $(Q)$(MAKE) $(build)=fixdep
37
38$(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
39 $(QUIET_LINK)$(CC) $(LDFLAGS) -o $@ $<
40
41FORCE:
42
43.PHONY: FORCE
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index 0c5f485521d6..4a96473b180f 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -21,6 +21,13 @@ endif
21 21
22build-dir := $(srctree)/tools/build 22build-dir := $(srctree)/tools/build
23 23
24# Define $(fixdep) for dep-cmd function
25ifeq ($(OUTPUT),)
26 fixdep := $(build-dir)/fixdep
27else
28 fixdep := $(OUTPUT)/fixdep
29endif
30
24# Generic definitions 31# Generic definitions
25include $(build-dir)/Build.include 32include $(build-dir)/Build.include
26 33
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index c8fe6d177119..37ff4c9f92f1 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -53,7 +53,8 @@ FEATURE_TESTS ?= \
53 libdw-dwarf-unwind \ 53 libdw-dwarf-unwind \
54 zlib \ 54 zlib \
55 lzma \ 55 lzma \
56 get_cpuid 56 get_cpuid \
57 bpf
57 58
58FEATURE_DISPLAY ?= \ 59FEATURE_DISPLAY ?= \
59 dwarf \ 60 dwarf \
@@ -71,7 +72,8 @@ FEATURE_DISPLAY ?= \
71 libdw-dwarf-unwind \ 72 libdw-dwarf-unwind \
72 zlib \ 73 zlib \
73 lzma \ 74 lzma \
74 get_cpuid 75 get_cpuid \
76 bpf
75 77
76# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. 78# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
77# If in the future we need per-feature checks/flags for features not 79# If in the future we need per-feature checks/flags for features not
@@ -121,8 +123,9 @@ define feature_print_text_code
121 MSG = $(shell printf '...%30s: %s' $(1) $(2)) 123 MSG = $(shell printf '...%30s: %s' $(1) $(2))
122endef 124endef
123 125
126FEATURE_DUMP_FILENAME = $(OUTPUT)FEATURE-DUMP$(FEATURE_USER)
124FEATURE_DUMP := $(foreach feat,$(FEATURE_DISPLAY),feature-$(feat)($(feature-$(feat)))) 127FEATURE_DUMP := $(foreach feat,$(FEATURE_DISPLAY),feature-$(feat)($(feature-$(feat))))
125FEATURE_DUMP_FILE := $(shell touch $(OUTPUT)FEATURE-DUMP; cat $(OUTPUT)FEATURE-DUMP) 128FEATURE_DUMP_FILE := $(shell touch $(FEATURE_DUMP_FILENAME); cat $(FEATURE_DUMP_FILENAME))
126 129
127ifeq ($(dwarf-post-unwind),1) 130ifeq ($(dwarf-post-unwind),1)
128 FEATURE_DUMP += dwarf-post-unwind($(dwarf-post-unwind-text)) 131 FEATURE_DUMP += dwarf-post-unwind($(dwarf-post-unwind-text))
@@ -131,16 +134,16 @@ endif
131# The $(feature_display) controls the default detection message 134# The $(feature_display) controls the default detection message
132# output. It's set if: 135# output. It's set if:
133# - detected features differes from stored features from 136# - detected features differes from stored features from
134# last build (in FEATURE-DUMP file) 137# last build (in $(FEATURE_DUMP_FILENAME) file)
135# - one of the $(FEATURE_DISPLAY) is not detected 138# - one of the $(FEATURE_DISPLAY) is not detected
136# - VF is enabled 139# - VF is enabled
137 140
138ifneq ("$(FEATURE_DUMP)","$(FEATURE_DUMP_FILE)") 141ifneq ("$(FEATURE_DUMP)","$(FEATURE_DUMP_FILE)")
139 $(shell echo "$(FEATURE_DUMP)" > $(OUTPUT)FEATURE-DUMP) 142 $(shell echo "$(FEATURE_DUMP)" > $(FEATURE_DUMP_FILENAME))
140 feature_display := 1 143 feature_display := 1
141endif 144endif
142 145
143feature_display_check = $(eval $(feature_check_code)) 146feature_display_check = $(eval $(feature_check_display_code))
144define feature_display_check_code 147define feature_display_check_code
145 ifneq ($(feature-$(1)), 1) 148 ifneq ($(feature-$(1)), 1)
146 feature_display := 1 149 feature_display := 1
diff --git a/tools/build/Makefile.include b/tools/build/Makefile.include
new file mode 100644
index 000000000000..4e09ad617a60
--- /dev/null
+++ b/tools/build/Makefile.include
@@ -0,0 +1,10 @@
1build := -f $(srctree)/tools/build/Makefile.build dir=. obj
2
3ifdef CROSS_COMPILE
4fixdep:
5else
6fixdep:
7 $(Q)$(MAKE) -C $(srctree)/tools/build fixdep
8endif
9
10.PHONY: fixdep
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index e43a2971bf56..cea04ce9f35c 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -132,10 +132,10 @@ test-libbfd.bin:
132 $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl 132 $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
133 133
134test-liberty.bin: 134test-liberty.bin:
135 $(CC) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty 135 $(CC) $(CFLAGS) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
136 136
137test-liberty-z.bin: 137test-liberty-z.bin:
138 $(CC) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz 138 $(CC) $(CFLAGS) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -lz
139 139
140test-cplus-demangle.bin: 140test-cplus-demangle.bin:
141 $(BUILD) -liberty 141 $(BUILD) -liberty
diff --git a/tools/build/fixdep.c b/tools/build/fixdep.c
new file mode 100644
index 000000000000..1521d36cef0d
--- /dev/null
+++ b/tools/build/fixdep.c
@@ -0,0 +1,168 @@
1/*
2 * "Optimize" a list of dependencies as spit out by gcc -MD
3 * for the build framework.
4 *
5 * Original author:
6 * Copyright 2002 by Kai Germaschewski <kai.germaschewski@gmx.de>
7 *
8 * This code has been borrowed from kbuild's fixdep (scripts/basic/fixdep.c),
9 * Please check it for detailed explanation. This fixdep borow only the
10 * base transformation of dependecies without the CONFIG mangle.
11 */
12
13#include <sys/types.h>
14#include <sys/stat.h>
15#include <sys/mman.h>
16#include <unistd.h>
17#include <fcntl.h>
18#include <string.h>
19#include <stdlib.h>
20#include <stdio.h>
21#include <limits.h>
22
23char *target;
24char *depfile;
25char *cmdline;
26
27static void usage(void)
28{
29 fprintf(stderr, "Usage: fixdep <depfile> <target> <cmdline>\n");
30 exit(1);
31}
32
33/*
34 * Print out the commandline prefixed with cmd_<target filename> :=
35 */
36static void print_cmdline(void)
37{
38 printf("cmd_%s := %s\n\n", target, cmdline);
39}
40
41/*
42 * Important: The below generated source_foo.o and deps_foo.o variable
43 * assignments are parsed not only by make, but also by the rather simple
44 * parser in scripts/mod/sumversion.c.
45 */
46static void parse_dep_file(void *map, size_t len)
47{
48 char *m = map;
49 char *end = m + len;
50 char *p;
51 char s[PATH_MAX];
52 int is_target;
53 int saw_any_target = 0;
54 int is_first_dep = 0;
55
56 while (m < end) {
57 /* Skip any "white space" */
58 while (m < end && (*m == ' ' || *m == '\\' || *m == '\n'))
59 m++;
60 /* Find next "white space" */
61 p = m;
62 while (p < end && *p != ' ' && *p != '\\' && *p != '\n')
63 p++;
64 /* Is the token we found a target name? */
65 is_target = (*(p-1) == ':');
66 /* Don't write any target names into the dependency file */
67 if (is_target) {
68 /* The /next/ file is the first dependency */
69 is_first_dep = 1;
70 } else {
71 /* Save this token/filename */
72 memcpy(s, m, p-m);
73 s[p - m] = 0;
74
75 /*
76 * Do not list the source file as dependency,
77 * so that kbuild is not confused if a .c file
78 * is rewritten into .S or vice versa. Storing
79 * it in source_* is needed for modpost to
80 * compute srcversions.
81 */
82 if (is_first_dep) {
83 /*
84 * If processing the concatenation of
85 * multiple dependency files, only
86 * process the first target name, which
87 * will be the original source name,
88 * and ignore any other target names,
89 * which will be intermediate temporary
90 * files.
91 */
92 if (!saw_any_target) {
93 saw_any_target = 1;
94 printf("source_%s := %s\n\n",
95 target, s);
96 printf("deps_%s := \\\n",
97 target);
98 }
99 is_first_dep = 0;
100 } else
101 printf(" %s \\\n", s);
102 }
103 /*
104 * Start searching for next token immediately after the first
105 * "whitespace" character that follows this token.
106 */
107 m = p + 1;
108 }
109
110 if (!saw_any_target) {
111 fprintf(stderr, "fixdep: parse error; no targets found\n");
112 exit(1);
113 }
114
115 printf("\n%s: $(deps_%s)\n\n", target, target);
116 printf("$(deps_%s):\n", target);
117}
118
119static void print_deps(void)
120{
121 struct stat st;
122 int fd;
123 void *map;
124
125 fd = open(depfile, O_RDONLY);
126 if (fd < 0) {
127 fprintf(stderr, "fixdep: error opening depfile: ");
128 perror(depfile);
129 exit(2);
130 }
131 if (fstat(fd, &st) < 0) {
132 fprintf(stderr, "fixdep: error fstat'ing depfile: ");
133 perror(depfile);
134 exit(2);
135 }
136 if (st.st_size == 0) {
137 fprintf(stderr, "fixdep: %s is empty\n", depfile);
138 close(fd);
139 return;
140 }
141 map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
142 if ((long) map == -1) {
143 perror("fixdep: mmap");
144 close(fd);
145 return;
146 }
147
148 parse_dep_file(map, st.st_size);
149
150 munmap(map, st.st_size);
151
152 close(fd);
153}
154
155int main(int argc, char **argv)
156{
157 if (argc != 4)
158 usage();
159
160 depfile = argv[1];
161 target = argv[2];
162 cmdline = argv[3];
163
164 print_cmdline();
165 print_deps();
166
167 return 0;
168}
diff --git a/tools/build/tests/ex/Build b/tools/build/tests/ex/Build
index 429c7d452101..4d502f9b1a50 100644
--- a/tools/build/tests/ex/Build
+++ b/tools/build/tests/ex/Build
@@ -4,6 +4,7 @@ ex-y += b.o
4ex-y += b.o 4ex-y += b.o
5ex-y += empty/ 5ex-y += empty/
6ex-y += empty2/ 6ex-y += empty2/
7ex-y += inc.o
7 8
8libex-y += c.o 9libex-y += c.o
9libex-y += d.o 10libex-y += d.o
diff --git a/tools/build/tests/ex/Makefile b/tools/build/tests/ex/Makefile
index 52d2476073a3..c50d5782ad5a 100644
--- a/tools/build/tests/ex/Makefile
+++ b/tools/build/tests/ex/Makefile
@@ -1,19 +1,22 @@
1export srctree := ../../../.. 1export srctree := $(abspath ../../../..)
2export CC := gcc 2export CC := gcc
3export LD := ld 3export LD := ld
4export AR := ar 4export AR := ar
5 5
6build := -f $(srctree)/tools/build/Makefile.build dir=. obj 6ex:
7
8include $(srctree)/tools/build/Makefile.include
9
7ex: ex-in.o libex-in.o 10ex: ex-in.o libex-in.o
8 gcc -o $@ $^ 11 gcc -o $@ $^
9 12
10ex.%: FORCE 13ex.%: fixdep FORCE
11 make -f $(srctree)/tools/build/Makefile.build dir=. $@ 14 make -f $(srctree)/tools/build/Makefile.build dir=. $@
12 15
13ex-in.o: FORCE 16ex-in.o: fixdep FORCE
14 make $(build)=ex 17 make $(build)=ex
15 18
16libex-in.o: FORCE 19libex-in.o: fixdep FORCE
17 make $(build)=libex 20 make $(build)=libex
18 21
19clean: 22clean:
diff --git a/tools/build/tests/ex/ex.c b/tools/build/tests/ex/ex.c
index dc42eb2e1a67..57de6074d252 100644
--- a/tools/build/tests/ex/ex.c
+++ b/tools/build/tests/ex/ex.c
@@ -5,6 +5,7 @@ int c(void);
5int d(void); 5int d(void);
6int e(void); 6int e(void);
7int f(void); 7int f(void);
8int inc(void);
8 9
9int main(void) 10int main(void)
10{ 11{
@@ -14,6 +15,7 @@ int main(void)
14 d(); 15 d();
15 e(); 16 e();
16 f(); 17 f();
18 inc();
17 19
18 return 0; 20 return 0;
19} 21}
diff --git a/tools/build/tests/ex/inc.c b/tools/build/tests/ex/inc.c
new file mode 100644
index 000000000000..c20f1e9033a3
--- /dev/null
+++ b/tools/build/tests/ex/inc.c
@@ -0,0 +1,8 @@
1#ifdef INCLUDE
2#include "krava.h"
3#endif
4
5int inc(void)
6{
7 return 0;
8}
diff --git a/tools/build/tests/run.sh b/tools/build/tests/run.sh
index 5494f8ea7567..44d2a0fade67 100755
--- a/tools/build/tests/run.sh
+++ b/tools/build/tests/run.sh
@@ -34,9 +34,36 @@ function test_ex_suffix {
34 make -C ex V=1 clean > /dev/null 2>&1 34 make -C ex V=1 clean > /dev/null 2>&1
35 rm -f ex.out 35 rm -f ex.out
36} 36}
37
38function test_ex_include {
39 make -C ex V=1 clean > ex.out 2>&1
40
41 # build with krava.h include
42 touch ex/krava.h
43 make -C ex V=1 CFLAGS=-DINCLUDE >> ex.out 2>&1
44
45 if [ ! -x ./ex/ex ]; then
46 echo FAILED
47 exit -1
48 fi
49
50 # build without the include
51 rm -f ex/krava.h ex/ex
52 make -C ex V=1 >> ex.out 2>&1
53
54 if [ ! -x ./ex/ex ]; then
55 echo FAILED
56 exit -1
57 fi
58
59 make -C ex V=1 clean > /dev/null 2>&1
60 rm -f ex.out
61}
62
37echo -n Testing.. 63echo -n Testing..
38 64
39test_ex 65test_ex
40test_ex_suffix 66test_ex_suffix
67test_ex_include
41 68
42echo OK 69echo OK
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 9098083869c8..fa7208a32d76 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -43,13 +43,29 @@
43 43
44#include <linux/types.h> 44#include <linux/types.h>
45 45
46/*
47 * Following functions are taken from kernel sources and
48 * break aliasing rules in their original form.
49 *
50 * While kernel is compiled with -fno-strict-aliasing,
51 * perf uses -Wstrict-aliasing=3 which makes build fail
52 * under gcc 4.4.
53 *
54 * Using extra __may_alias__ type to allow aliasing
55 * in this case.
56 */
57typedef __u8 __attribute__((__may_alias__)) __u8_alias_t;
58typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
59typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
60typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
61
46static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 62static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
47{ 63{
48 switch (size) { 64 switch (size) {
49 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; 65 case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break;
50 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; 66 case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
51 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; 67 case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
52 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; 68 case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
53 default: 69 default:
54 barrier(); 70 barrier();
55 __builtin_memcpy((void *)res, (const void *)p, size); 71 __builtin_memcpy((void *)res, (const void *)p, size);
@@ -60,10 +76,10 @@ static __always_inline void __read_once_size(const volatile void *p, void *res,
60static __always_inline void __write_once_size(volatile void *p, void *res, int size) 76static __always_inline void __write_once_size(volatile void *p, void *res, int size)
61{ 77{
62 switch (size) { 78 switch (size) {
63 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 79 case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break;
64 case 2: *(volatile __u16 *)p = *(__u16 *)res; break; 80 case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
65 case 4: *(volatile __u32 *)p = *(__u32 *)res; break; 81 case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
66 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 82 case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
67 default: 83 default:
68 barrier(); 84 barrier();
69 __builtin_memcpy((void *)p, (const void *)res, size); 85 __builtin_memcpy((void *)p, (const void *)res, size);
diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h
new file mode 100644
index 000000000000..bdc3dd8131d4
--- /dev/null
+++ b/tools/include/linux/err.h
@@ -0,0 +1,49 @@
1#ifndef __TOOLS_LINUX_ERR_H
2#define __TOOLS_LINUX_ERR_H
3
4#include <linux/compiler.h>
5#include <linux/types.h>
6
7#include <asm/errno.h>
8
9/*
10 * Original kernel header comment:
11 *
12 * Kernel pointers have redundant information, so we can use a
13 * scheme where we can return either an error code or a normal
14 * pointer with the same return value.
15 *
16 * This should be a per-architecture thing, to allow different
17 * error and pointer decisions.
18 *
19 * Userspace note:
20 * The same principle works for userspace, because 'error' pointers
21 * fall down to the unused hole far from user space, as described
22 * in Documentation/x86/x86_64/mm.txt for x86_64 arch:
23 *
24 * 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm hole caused by [48:63] sign extension
25 * ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
26 *
27 * It should be the same case for other architectures, because
28 * this code is used in generic kernel code.
29 */
30#define MAX_ERRNO 4095
31
32#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
33
34static inline void * __must_check ERR_PTR(long error_)
35{
36 return (void *) error_;
37}
38
39static inline long __must_check PTR_ERR(__force const void *ptr)
40{
41 return (long) ptr;
42}
43
44static inline bool __must_check IS_ERR(__force const void *ptr)
45{
46 return IS_ERR_VALUE((unsigned long)ptr);
47}
48
49#endif /* _LINUX_ERR_H */
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
new file mode 100644
index 000000000000..3276625595b2
--- /dev/null
+++ b/tools/include/linux/filter.h
@@ -0,0 +1,231 @@
1/*
2 * Linux Socket Filter Data Structures
3 */
4#ifndef __TOOLS_LINUX_FILTER_H
5#define __TOOLS_LINUX_FILTER_H
6
7#include <linux/bpf.h>
8
9/* ArgX, context and stack frame pointer register positions. Note,
10 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
11 * calls in BPF_CALL instruction.
12 */
13#define BPF_REG_ARG1 BPF_REG_1
14#define BPF_REG_ARG2 BPF_REG_2
15#define BPF_REG_ARG3 BPF_REG_3
16#define BPF_REG_ARG4 BPF_REG_4
17#define BPF_REG_ARG5 BPF_REG_5
18#define BPF_REG_CTX BPF_REG_6
19#define BPF_REG_FP BPF_REG_10
20
21/* Additional register mappings for converted user programs. */
22#define BPF_REG_A BPF_REG_0
23#define BPF_REG_X BPF_REG_7
24#define BPF_REG_TMP BPF_REG_8
25
26/* BPF program can access up to 512 bytes of stack space. */
27#define MAX_BPF_STACK 512
28
29/* Helper macros for filter block array initializers. */
30
31/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
32
33#define BPF_ALU64_REG(OP, DST, SRC) \
34 ((struct bpf_insn) { \
35 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
36 .dst_reg = DST, \
37 .src_reg = SRC, \
38 .off = 0, \
39 .imm = 0 })
40
41#define BPF_ALU32_REG(OP, DST, SRC) \
42 ((struct bpf_insn) { \
43 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
44 .dst_reg = DST, \
45 .src_reg = SRC, \
46 .off = 0, \
47 .imm = 0 })
48
49/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
50
51#define BPF_ALU64_IMM(OP, DST, IMM) \
52 ((struct bpf_insn) { \
53 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
54 .dst_reg = DST, \
55 .src_reg = 0, \
56 .off = 0, \
57 .imm = IMM })
58
59#define BPF_ALU32_IMM(OP, DST, IMM) \
60 ((struct bpf_insn) { \
61 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
62 .dst_reg = DST, \
63 .src_reg = 0, \
64 .off = 0, \
65 .imm = IMM })
66
67/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
68
69#define BPF_ENDIAN(TYPE, DST, LEN) \
70 ((struct bpf_insn) { \
71 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
72 .dst_reg = DST, \
73 .src_reg = 0, \
74 .off = 0, \
75 .imm = LEN })
76
77/* Short form of mov, dst_reg = src_reg */
78
79#define BPF_MOV64_REG(DST, SRC) \
80 ((struct bpf_insn) { \
81 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
82 .dst_reg = DST, \
83 .src_reg = SRC, \
84 .off = 0, \
85 .imm = 0 })
86
87#define BPF_MOV32_REG(DST, SRC) \
88 ((struct bpf_insn) { \
89 .code = BPF_ALU | BPF_MOV | BPF_X, \
90 .dst_reg = DST, \
91 .src_reg = SRC, \
92 .off = 0, \
93 .imm = 0 })
94
95/* Short form of mov, dst_reg = imm32 */
96
97#define BPF_MOV64_IMM(DST, IMM) \
98 ((struct bpf_insn) { \
99 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
100 .dst_reg = DST, \
101 .src_reg = 0, \
102 .off = 0, \
103 .imm = IMM })
104
105#define BPF_MOV32_IMM(DST, IMM) \
106 ((struct bpf_insn) { \
107 .code = BPF_ALU | BPF_MOV | BPF_K, \
108 .dst_reg = DST, \
109 .src_reg = 0, \
110 .off = 0, \
111 .imm = IMM })
112
113/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
114
115#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
116 ((struct bpf_insn) { \
117 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
118 .dst_reg = DST, \
119 .src_reg = SRC, \
120 .off = 0, \
121 .imm = IMM })
122
123#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
124 ((struct bpf_insn) { \
125 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
126 .dst_reg = DST, \
127 .src_reg = SRC, \
128 .off = 0, \
129 .imm = IMM })
130
131/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
132
133#define BPF_LD_ABS(SIZE, IMM) \
134 ((struct bpf_insn) { \
135 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
136 .dst_reg = 0, \
137 .src_reg = 0, \
138 .off = 0, \
139 .imm = IMM })
140
141/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
142
143#define BPF_LD_IND(SIZE, SRC, IMM) \
144 ((struct bpf_insn) { \
145 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
146 .dst_reg = 0, \
147 .src_reg = SRC, \
148 .off = 0, \
149 .imm = IMM })
150
151/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
152
153#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
154 ((struct bpf_insn) { \
155 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
156 .dst_reg = DST, \
157 .src_reg = SRC, \
158 .off = OFF, \
159 .imm = 0 })
160
161/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
162
163#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
164 ((struct bpf_insn) { \
165 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
166 .dst_reg = DST, \
167 .src_reg = SRC, \
168 .off = OFF, \
169 .imm = 0 })
170
171/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
172
173#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
174 ((struct bpf_insn) { \
175 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
176 .dst_reg = DST, \
177 .src_reg = 0, \
178 .off = OFF, \
179 .imm = IMM })
180
181/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
182
183#define BPF_JMP_REG(OP, DST, SRC, OFF) \
184 ((struct bpf_insn) { \
185 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
186 .dst_reg = DST, \
187 .src_reg = SRC, \
188 .off = OFF, \
189 .imm = 0 })
190
191/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
192
193#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
194 ((struct bpf_insn) { \
195 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
196 .dst_reg = DST, \
197 .src_reg = 0, \
198 .off = OFF, \
199 .imm = IMM })
200
201/* Function call */
202
203#define BPF_EMIT_CALL(FUNC) \
204 ((struct bpf_insn) { \
205 .code = BPF_JMP | BPF_CALL, \
206 .dst_reg = 0, \
207 .src_reg = 0, \
208 .off = 0, \
209 .imm = ((FUNC) - BPF_FUNC_unspec) })
210
211/* Raw code statement block */
212
213#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
214 ((struct bpf_insn) { \
215 .code = CODE, \
216 .dst_reg = DST, \
217 .src_reg = SRC, \
218 .off = OFF, \
219 .imm = IMM })
220
221/* Program exit */
222
223#define BPF_EXIT_INSN() \
224 ((struct bpf_insn) { \
225 .code = BPF_JMP | BPF_EXIT, \
226 .dst_reg = 0, \
227 .src_reg = 0, \
228 .off = 0, \
229 .imm = 0 })
230
231#endif /* __TOOLS_LINUX_FILTER_H */
diff --git a/tools/lib/api/Build b/tools/lib/api/Build
index 3653965cf481..e8b8a23b9bf4 100644
--- a/tools/lib/api/Build
+++ b/tools/lib/api/Build
@@ -1,2 +1,3 @@
1libapi-y += fd/ 1libapi-y += fd/
2libapi-y += fs/ 2libapi-y += fs/
3libapi-y += cpu.o
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index fe1b02c2c95b..d85904dc9b38 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -21,12 +21,14 @@ CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
21 21
22RM = rm -f 22RM = rm -f
23 23
24build := -f $(srctree)/tools/build/Makefile.build dir=. obj
25API_IN := $(OUTPUT)libapi-in.o 24API_IN := $(OUTPUT)libapi-in.o
26 25
26all:
27
27export srctree OUTPUT CC LD CFLAGS V 28export srctree OUTPUT CC LD CFLAGS V
29include $(srctree)/tools/build/Makefile.include
28 30
29all: $(LIBFILE) 31all: fixdep $(LIBFILE)
30 32
31$(API_IN): FORCE 33$(API_IN): FORCE
32 @$(MAKE) $(build)=libapi 34 @$(MAKE) $(build)=libapi
diff --git a/tools/lib/api/cpu.c b/tools/lib/api/cpu.c
new file mode 100644
index 000000000000..8c6489356e3a
--- /dev/null
+++ b/tools/lib/api/cpu.c
@@ -0,0 +1,18 @@
1#include <stdio.h>
2
3#include "cpu.h"
4#include "fs/fs.h"
5
6int cpu__get_max_freq(unsigned long long *freq)
7{
8 char entry[PATH_MAX];
9 int cpu;
10
11 if (sysfs__read_int("devices/system/cpu/online", &cpu) < 0)
12 return -1;
13
14 snprintf(entry, sizeof(entry),
15 "devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpu);
16
17 return sysfs__read_ull(entry, freq);
18}
diff --git a/tools/lib/api/cpu.h b/tools/lib/api/cpu.h
new file mode 100644
index 000000000000..81e9d3955961
--- /dev/null
+++ b/tools/lib/api/cpu.h
@@ -0,0 +1,6 @@
1#ifndef __API_CPU__
2#define __API_CPU__
3
4int cpu__get_max_freq(unsigned long long *freq);
5
6#endif /* __API_CPU__ */
diff --git a/tools/lib/api/fs/Build b/tools/lib/api/fs/Build
index 6de5a4f0b501..f4ed9629ae85 100644
--- a/tools/lib/api/fs/Build
+++ b/tools/lib/api/fs/Build
@@ -1,4 +1,2 @@
1libapi-y += fs.o 1libapi-y += fs.o
2libapi-y += debugfs.o 2libapi-y += tracing_path.o
3libapi-y += findfs.o
4libapi-y += tracefs.o
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c
deleted file mode 100644
index eb7cf4d18f8a..000000000000
--- a/tools/lib/api/fs/debugfs.c
+++ /dev/null
@@ -1,129 +0,0 @@
1#define _GNU_SOURCE
2#include <errno.h>
3#include <stdio.h>
4#include <stdlib.h>
5#include <string.h>
6#include <unistd.h>
7#include <stdbool.h>
8#include <sys/vfs.h>
9#include <sys/types.h>
10#include <sys/stat.h>
11#include <sys/mount.h>
12#include <linux/kernel.h>
13
14#include "debugfs.h"
15#include "tracefs.h"
16
17#ifndef DEBUGFS_DEFAULT_PATH
18#define DEBUGFS_DEFAULT_PATH "/sys/kernel/debug"
19#endif
20
21char debugfs_mountpoint[PATH_MAX + 1] = DEBUGFS_DEFAULT_PATH;
22
23static const char * const debugfs_known_mountpoints[] = {
24 DEBUGFS_DEFAULT_PATH,
25 "/debug",
26 0,
27};
28
29static bool debugfs_found;
30
31bool debugfs_configured(void)
32{
33 return debugfs_find_mountpoint() != NULL;
34}
35
36/* find the path to the mounted debugfs */
37const char *debugfs_find_mountpoint(void)
38{
39 const char *ret;
40
41 if (debugfs_found)
42 return (const char *)debugfs_mountpoint;
43
44 ret = find_mountpoint("debugfs", (long) DEBUGFS_MAGIC,
45 debugfs_mountpoint, PATH_MAX + 1,
46 debugfs_known_mountpoints);
47 if (ret)
48 debugfs_found = true;
49
50 return ret;
51}
52
53/* mount the debugfs somewhere if it's not mounted */
54char *debugfs_mount(const char *mountpoint)
55{
56 /* see if it's already mounted */
57 if (debugfs_find_mountpoint())
58 goto out;
59
60 /* if not mounted and no argument */
61 if (mountpoint == NULL) {
62 /* see if environment variable set */
63 mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT);
64 /* if no environment variable, use default */
65 if (mountpoint == NULL)
66 mountpoint = DEBUGFS_DEFAULT_PATH;
67 }
68
69 if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
70 return NULL;
71
72 /* save the mountpoint */
73 debugfs_found = true;
74 strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
75out:
76 return debugfs_mountpoint;
77}
78
79int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename)
80{
81 char sbuf[128];
82
83 switch (err) {
84 case ENOENT:
85 if (debugfs_found) {
86 snprintf(buf, size,
87 "Error:\tFile %s/%s not found.\n"
88 "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
89 debugfs_mountpoint, filename);
90 break;
91 }
92 snprintf(buf, size, "%s",
93 "Error:\tUnable to find debugfs\n"
94 "Hint:\tWas your kernel compiled with debugfs support?\n"
95 "Hint:\tIs the debugfs filesystem mounted?\n"
96 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
97 break;
98 case EACCES: {
99 const char *mountpoint = debugfs_mountpoint;
100
101 if (!access(debugfs_mountpoint, R_OK) && strncmp(filename, "tracing/", 8) == 0) {
102 const char *tracefs_mntpoint = tracefs_find_mountpoint();
103
104 if (tracefs_mntpoint)
105 mountpoint = tracefs_mntpoint;
106 }
107
108 snprintf(buf, size,
109 "Error:\tNo permissions to read %s/%s\n"
110 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
111 debugfs_mountpoint, filename, mountpoint);
112 }
113 break;
114 default:
115 snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
116 break;
117 }
118
119 return 0;
120}
121
122int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name)
123{
124 char path[PATH_MAX];
125
126 snprintf(path, PATH_MAX, "tracing/events/%s/%s", sys, name ?: "*");
127
128 return debugfs__strerror_open(err, buf, size, path);
129}
diff --git a/tools/lib/api/fs/debugfs.h b/tools/lib/api/fs/debugfs.h
deleted file mode 100644
index 455023698d2b..000000000000
--- a/tools/lib/api/fs/debugfs.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef __API_DEBUGFS_H__
2#define __API_DEBUGFS_H__
3
4#include "findfs.h"
5
6#ifndef DEBUGFS_MAGIC
7#define DEBUGFS_MAGIC 0x64626720
8#endif
9
10#ifndef PERF_DEBUGFS_ENVIRONMENT
11#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
12#endif
13
14bool debugfs_configured(void);
15const char *debugfs_find_mountpoint(void);
16char *debugfs_mount(const char *mountpoint);
17
18extern char debugfs_mountpoint[];
19
20int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename);
21int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name);
22
23#endif /* __API_DEBUGFS_H__ */
diff --git a/tools/lib/api/fs/findfs.c b/tools/lib/api/fs/findfs.c
deleted file mode 100644
index 49946cb6d7af..000000000000
--- a/tools/lib/api/fs/findfs.c
+++ /dev/null
@@ -1,63 +0,0 @@
1#include <errno.h>
2#include <stdio.h>
3#include <stdlib.h>
4#include <string.h>
5#include <stdbool.h>
6#include <sys/vfs.h>
7
8#include "findfs.h"
9
10/* verify that a mountpoint is actually the type we want */
11
12int valid_mountpoint(const char *mount, long magic)
13{
14 struct statfs st_fs;
15
16 if (statfs(mount, &st_fs) < 0)
17 return -ENOENT;
18 else if ((long)st_fs.f_type != magic)
19 return -ENOENT;
20
21 return 0;
22}
23
24/* find the path to a mounted file system */
25const char *find_mountpoint(const char *fstype, long magic,
26 char *mountpoint, int len,
27 const char * const *known_mountpoints)
28{
29 const char * const *ptr;
30 char format[128];
31 char type[100];
32 FILE *fp;
33
34 if (known_mountpoints) {
35 ptr = known_mountpoints;
36 while (*ptr) {
37 if (valid_mountpoint(*ptr, magic) == 0) {
38 strncpy(mountpoint, *ptr, len - 1);
39 mountpoint[len-1] = 0;
40 return mountpoint;
41 }
42 ptr++;
43 }
44 }
45
46 /* give up and parse /proc/mounts */
47 fp = fopen("/proc/mounts", "r");
48 if (fp == NULL)
49 return NULL;
50
51 snprintf(format, 128, "%%*s %%%ds %%99s %%*s %%*d %%*d\n", len);
52
53 while (fscanf(fp, format, mountpoint, type) == 2) {
54 if (strcmp(type, fstype) == 0)
55 break;
56 }
57 fclose(fp);
58
59 if (strcmp(type, fstype) != 0)
60 return NULL;
61
62 return mountpoint;
63}
diff --git a/tools/lib/api/fs/findfs.h b/tools/lib/api/fs/findfs.h
deleted file mode 100644
index b6f5d05acc42..000000000000
--- a/tools/lib/api/fs/findfs.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef __API_FINDFS_H__
2#define __API_FINDFS_H__
3
4#include <stdbool.h>
5
6#define _STR(x) #x
7#define STR(x) _STR(x)
8
9/*
10 * On most systems <limits.h> would have given us this, but not on some systems
11 * (e.g. GNU/Hurd).
12 */
13#ifndef PATH_MAX
14#define PATH_MAX 4096
15#endif
16
17const char *find_mountpoint(const char *fstype, long magic,
18 char *mountpoint, int len,
19 const char * const *known_mountpoints);
20
21int valid_mountpoint(const char *mount, long magic);
22
23#endif /* __API_FINDFS_H__ */
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 128ef6332a6b..459599d1b6c4 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -1,7 +1,6 @@
1/* TODO merge/factor in debugfs.c here */
2
3#include <ctype.h> 1#include <ctype.h>
4#include <errno.h> 2#include <errno.h>
3#include <limits.h>
5#include <stdbool.h> 4#include <stdbool.h>
6#include <stdio.h> 5#include <stdio.h>
7#include <stdlib.h> 6#include <stdlib.h>
@@ -11,10 +10,29 @@
11#include <sys/stat.h> 10#include <sys/stat.h>
12#include <fcntl.h> 11#include <fcntl.h>
13#include <unistd.h> 12#include <unistd.h>
13#include <sys/mount.h>
14 14
15#include "debugfs.h"
16#include "fs.h" 15#include "fs.h"
17 16
17#define _STR(x) #x
18#define STR(x) _STR(x)
19
20#ifndef SYSFS_MAGIC
21#define SYSFS_MAGIC 0x62656572
22#endif
23
24#ifndef PROC_SUPER_MAGIC
25#define PROC_SUPER_MAGIC 0x9fa0
26#endif
27
28#ifndef DEBUGFS_MAGIC
29#define DEBUGFS_MAGIC 0x64626720
30#endif
31
32#ifndef TRACEFS_MAGIC
33#define TRACEFS_MAGIC 0x74726163
34#endif
35
18static const char * const sysfs__fs_known_mountpoints[] = { 36static const char * const sysfs__fs_known_mountpoints[] = {
19 "/sys", 37 "/sys",
20 0, 38 0,
@@ -25,19 +43,48 @@ static const char * const procfs__known_mountpoints[] = {
25 0, 43 0,
26}; 44};
27 45
46#ifndef DEBUGFS_DEFAULT_PATH
47#define DEBUGFS_DEFAULT_PATH "/sys/kernel/debug"
48#endif
49
50static const char * const debugfs__known_mountpoints[] = {
51 DEBUGFS_DEFAULT_PATH,
52 "/debug",
53 0,
54};
55
56
57#ifndef TRACEFS_DEFAULT_PATH
58#define TRACEFS_DEFAULT_PATH "/sys/kernel/tracing"
59#endif
60
61static const char * const tracefs__known_mountpoints[] = {
62 TRACEFS_DEFAULT_PATH,
63 "/sys/kernel/debug/tracing",
64 "/tracing",
65 "/trace",
66 0,
67};
68
28struct fs { 69struct fs {
29 const char *name; 70 const char *name;
30 const char * const *mounts; 71 const char * const *mounts;
31 char path[PATH_MAX + 1]; 72 char path[PATH_MAX];
32 bool found; 73 bool found;
33 long magic; 74 long magic;
34}; 75};
35 76
36enum { 77enum {
37 FS__SYSFS = 0, 78 FS__SYSFS = 0,
38 FS__PROCFS = 1, 79 FS__PROCFS = 1,
80 FS__DEBUGFS = 2,
81 FS__TRACEFS = 3,
39}; 82};
40 83
84#ifndef TRACEFS_MAGIC
85#define TRACEFS_MAGIC 0x74726163
86#endif
87
41static struct fs fs__entries[] = { 88static struct fs fs__entries[] = {
42 [FS__SYSFS] = { 89 [FS__SYSFS] = {
43 .name = "sysfs", 90 .name = "sysfs",
@@ -49,6 +96,16 @@ static struct fs fs__entries[] = {
49 .mounts = procfs__known_mountpoints, 96 .mounts = procfs__known_mountpoints,
50 .magic = PROC_SUPER_MAGIC, 97 .magic = PROC_SUPER_MAGIC,
51 }, 98 },
99 [FS__DEBUGFS] = {
100 .name = "debugfs",
101 .mounts = debugfs__known_mountpoints,
102 .magic = DEBUGFS_MAGIC,
103 },
104 [FS__TRACEFS] = {
105 .name = "tracefs",
106 .mounts = tracefs__known_mountpoints,
107 .magic = TRACEFS_MAGIC,
108 },
52}; 109};
53 110
54static bool fs__read_mounts(struct fs *fs) 111static bool fs__read_mounts(struct fs *fs)
@@ -159,14 +216,54 @@ static const char *fs__mountpoint(int idx)
159 return fs__get_mountpoint(fs); 216 return fs__get_mountpoint(fs);
160} 217}
161 218
162#define FS__MOUNTPOINT(name, idx) \ 219static const char *mount_overload(struct fs *fs)
163const char *name##__mountpoint(void) \ 220{
164{ \ 221 size_t name_len = strlen(fs->name);
165 return fs__mountpoint(idx); \ 222 /* "PERF_" + name + "_ENVIRONMENT" + '\0' */
223 char upper_name[5 + name_len + 12 + 1];
224
225 snprintf(upper_name, name_len, "PERF_%s_ENVIRONMENT", fs->name);
226 mem_toupper(upper_name, name_len);
227
228 return getenv(upper_name) ?: *fs->mounts;
229}
230
231static const char *fs__mount(int idx)
232{
233 struct fs *fs = &fs__entries[idx];
234 const char *mountpoint;
235
236 if (fs__mountpoint(idx))
237 return (const char *)fs->path;
238
239 mountpoint = mount_overload(fs);
240
241 if (mount(NULL, mountpoint, fs->name, 0, NULL) < 0)
242 return NULL;
243
244 return fs__check_mounts(fs) ? fs->path : NULL;
245}
246
247#define FS(name, idx) \
248const char *name##__mountpoint(void) \
249{ \
250 return fs__mountpoint(idx); \
251} \
252 \
253const char *name##__mount(void) \
254{ \
255 return fs__mount(idx); \
256} \
257 \
258bool name##__configured(void) \
259{ \
260 return name##__mountpoint() != NULL; \
166} 261}
167 262
168FS__MOUNTPOINT(sysfs, FS__SYSFS); 263FS(sysfs, FS__SYSFS);
169FS__MOUNTPOINT(procfs, FS__PROCFS); 264FS(procfs, FS__PROCFS);
265FS(debugfs, FS__DEBUGFS);
266FS(tracefs, FS__TRACEFS);
170 267
171int filename__read_int(const char *filename, int *value) 268int filename__read_int(const char *filename, int *value)
172{ 269{
@@ -185,6 +282,50 @@ int filename__read_int(const char *filename, int *value)
185 return err; 282 return err;
186} 283}
187 284
285int filename__read_ull(const char *filename, unsigned long long *value)
286{
287 char line[64];
288 int fd = open(filename, O_RDONLY), err = -1;
289
290 if (fd < 0)
291 return -1;
292
293 if (read(fd, line, sizeof(line)) > 0) {
294 *value = strtoull(line, NULL, 10);
295 if (*value != ULLONG_MAX)
296 err = 0;
297 }
298
299 close(fd);
300 return err;
301}
302
303int sysfs__read_ull(const char *entry, unsigned long long *value)
304{
305 char path[PATH_MAX];
306 const char *sysfs = sysfs__mountpoint();
307
308 if (!sysfs)
309 return -1;
310
311 snprintf(path, sizeof(path), "%s/%s", sysfs, entry);
312
313 return filename__read_ull(path, value);
314}
315
316int sysfs__read_int(const char *entry, int *value)
317{
318 char path[PATH_MAX];
319 const char *sysfs = sysfs__mountpoint();
320
321 if (!sysfs)
322 return -1;
323
324 snprintf(path, sizeof(path), "%s/%s", sysfs, entry);
325
326 return filename__read_int(path, value);
327}
328
188int sysctl__read_int(const char *sysctl, int *value) 329int sysctl__read_int(const char *sysctl, int *value)
189{ 330{
190 char path[PATH_MAX]; 331 char path[PATH_MAX];
diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h
index 6caa2bbc6cec..d024a7f682f6 100644
--- a/tools/lib/api/fs/fs.h
+++ b/tools/lib/api/fs/fs.h
@@ -1,17 +1,33 @@
1#ifndef __API_FS__ 1#ifndef __API_FS__
2#define __API_FS__ 2#define __API_FS__
3 3
4#ifndef SYSFS_MAGIC 4#include <stdbool.h>
5#define SYSFS_MAGIC 0x62656572
6#endif
7 5
8#ifndef PROC_SUPER_MAGIC 6/*
9#define PROC_SUPER_MAGIC 0x9fa0 7 * On most systems <limits.h> would have given us this, but not on some systems
8 * (e.g. GNU/Hurd).
9 */
10#ifndef PATH_MAX
11#define PATH_MAX 4096
10#endif 12#endif
11 13
12const char *sysfs__mountpoint(void); 14#define FS(name) \
13const char *procfs__mountpoint(void); 15 const char *name##__mountpoint(void); \
16 const char *name##__mount(void); \
17 bool name##__configured(void); \
18
19FS(sysfs)
20FS(procfs)
21FS(debugfs)
22FS(tracefs)
23
24#undef FS
25
14 26
15int filename__read_int(const char *filename, int *value); 27int filename__read_int(const char *filename, int *value);
28int filename__read_ull(const char *filename, unsigned long long *value);
29
16int sysctl__read_int(const char *sysctl, int *value); 30int sysctl__read_int(const char *sysctl, int *value);
31int sysfs__read_int(const char *entry, int *value);
32int sysfs__read_ull(const char *entry, unsigned long long *value);
17#endif /* __API_FS__ */ 33#endif /* __API_FS__ */
diff --git a/tools/lib/api/fs/tracefs.c b/tools/lib/api/fs/tracefs.c
deleted file mode 100644
index e4aa9688b71e..000000000000
--- a/tools/lib/api/fs/tracefs.c
+++ /dev/null
@@ -1,78 +0,0 @@
1#include <errno.h>
2#include <stdio.h>
3#include <stdlib.h>
4#include <string.h>
5#include <unistd.h>
6#include <stdbool.h>
7#include <sys/vfs.h>
8#include <sys/types.h>
9#include <sys/stat.h>
10#include <sys/mount.h>
11#include <linux/kernel.h>
12
13#include "tracefs.h"
14
15#ifndef TRACEFS_DEFAULT_PATH
16#define TRACEFS_DEFAULT_PATH "/sys/kernel/tracing"
17#endif
18
19char tracefs_mountpoint[PATH_MAX + 1] = TRACEFS_DEFAULT_PATH;
20
21static const char * const tracefs_known_mountpoints[] = {
22 TRACEFS_DEFAULT_PATH,
23 "/sys/kernel/debug/tracing",
24 "/tracing",
25 "/trace",
26 0,
27};
28
29static bool tracefs_found;
30
31bool tracefs_configured(void)
32{
33 return tracefs_find_mountpoint() != NULL;
34}
35
36/* find the path to the mounted tracefs */
37const char *tracefs_find_mountpoint(void)
38{
39 const char *ret;
40
41 if (tracefs_found)
42 return (const char *)tracefs_mountpoint;
43
44 ret = find_mountpoint("tracefs", (long) TRACEFS_MAGIC,
45 tracefs_mountpoint, PATH_MAX + 1,
46 tracefs_known_mountpoints);
47
48 if (ret)
49 tracefs_found = true;
50
51 return ret;
52}
53
54/* mount the tracefs somewhere if it's not mounted */
55char *tracefs_mount(const char *mountpoint)
56{
57 /* see if it's already mounted */
58 if (tracefs_find_mountpoint())
59 goto out;
60
61 /* if not mounted and no argument */
62 if (mountpoint == NULL) {
63 /* see if environment variable set */
64 mountpoint = getenv(PERF_TRACEFS_ENVIRONMENT);
65 /* if no environment variable, use default */
66 if (mountpoint == NULL)
67 mountpoint = TRACEFS_DEFAULT_PATH;
68 }
69
70 if (mount(NULL, mountpoint, "tracefs", 0, NULL) < 0)
71 return NULL;
72
73 /* save the mountpoint */
74 tracefs_found = true;
75 strncpy(tracefs_mountpoint, mountpoint, sizeof(tracefs_mountpoint));
76out:
77 return tracefs_mountpoint;
78}
diff --git a/tools/lib/api/fs/tracefs.h b/tools/lib/api/fs/tracefs.h
deleted file mode 100644
index da780ac49acb..000000000000
--- a/tools/lib/api/fs/tracefs.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef __API_TRACEFS_H__
2#define __API_TRACEFS_H__
3
4#include "findfs.h"
5
6#ifndef TRACEFS_MAGIC
7#define TRACEFS_MAGIC 0x74726163
8#endif
9
10#ifndef PERF_TRACEFS_ENVIRONMENT
11#define PERF_TRACEFS_ENVIRONMENT "PERF_TRACEFS_DIR"
12#endif
13
14bool tracefs_configured(void);
15const char *tracefs_find_mountpoint(void);
16int tracefs_valid_mountpoint(const char *debugfs);
17char *tracefs_mount(const char *mountpoint);
18
19extern char tracefs_mountpoint[];
20
21#endif /* __API_DEBUGFS_H__ */
diff --git a/tools/lib/api/fs/tracing_path.c b/tools/lib/api/fs/tracing_path.c
new file mode 100644
index 000000000000..a26bb5ea8283
--- /dev/null
+++ b/tools/lib/api/fs/tracing_path.c
@@ -0,0 +1,135 @@
1#ifndef _GNU_SOURCE
2# define _GNU_SOURCE
3#endif
4
5#include <stdio.h>
6#include <stdlib.h>
7#include <string.h>
8#include <errno.h>
9#include <unistd.h>
10#include "fs.h"
11
12#include "tracing_path.h"
13
14
15char tracing_mnt[PATH_MAX] = "/sys/kernel/debug";
16char tracing_path[PATH_MAX] = "/sys/kernel/debug/tracing";
17char tracing_events_path[PATH_MAX] = "/sys/kernel/debug/tracing/events";
18
19
20static void __tracing_path_set(const char *tracing, const char *mountpoint)
21{
22 snprintf(tracing_mnt, sizeof(tracing_mnt), "%s", mountpoint);
23 snprintf(tracing_path, sizeof(tracing_path), "%s/%s",
24 mountpoint, tracing);
25 snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s%s",
26 mountpoint, tracing, "events");
27}
28
29static const char *tracing_path_tracefs_mount(void)
30{
31 const char *mnt;
32
33 mnt = tracefs__mount();
34 if (!mnt)
35 return NULL;
36
37 __tracing_path_set("", mnt);
38
39 return mnt;
40}
41
42static const char *tracing_path_debugfs_mount(void)
43{
44 const char *mnt;
45
46 mnt = debugfs__mount();
47 if (!mnt)
48 return NULL;
49
50 __tracing_path_set("tracing/", mnt);
51
52 return mnt;
53}
54
55const char *tracing_path_mount(void)
56{
57 const char *mnt;
58
59 mnt = tracing_path_tracefs_mount();
60 if (mnt)
61 return mnt;
62
63 mnt = tracing_path_debugfs_mount();
64
65 return mnt;
66}
67
68void tracing_path_set(const char *mntpt)
69{
70 __tracing_path_set("tracing/", mntpt);
71}
72
73char *get_tracing_file(const char *name)
74{
75 char *file;
76
77 if (asprintf(&file, "%s/%s", tracing_path, name) < 0)
78 return NULL;
79
80 return file;
81}
82
83void put_tracing_file(char *file)
84{
85 free(file);
86}
87
88static int strerror_open(int err, char *buf, size_t size, const char *filename)
89{
90 char sbuf[128];
91
92 switch (err) {
93 case ENOENT:
94 /*
95 * We will get here if we can't find the tracepoint, but one of
96 * debugfs or tracefs is configured, which means you probably
97 * want some tracepoint which wasn't compiled in your kernel.
98 * - jirka
99 */
100 if (debugfs__configured() || tracefs__configured()) {
101 snprintf(buf, size,
102 "Error:\tFile %s/%s not found.\n"
103 "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
104 tracing_events_path, filename);
105 break;
106 }
107 snprintf(buf, size, "%s",
108 "Error:\tUnable to find debugfs/tracefs\n"
109 "Hint:\tWas your kernel compiled with debugfs/tracefs support?\n"
110 "Hint:\tIs the debugfs/tracefs filesystem mounted?\n"
111 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
112 break;
113 case EACCES: {
114 snprintf(buf, size,
115 "Error:\tNo permissions to read %s/%s\n"
116 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
117 tracing_events_path, filename, tracing_mnt);
118 }
119 break;
120 default:
121 snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
122 break;
123 }
124
125 return 0;
126}
127
128int tracing_path__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name)
129{
130 char path[PATH_MAX];
131
132 snprintf(path, PATH_MAX, "%s/%s", sys, name ?: "*");
133
134 return strerror_open(err, buf, size, path);
135}
diff --git a/tools/lib/api/fs/tracing_path.h b/tools/lib/api/fs/tracing_path.h
new file mode 100644
index 000000000000..3f233ac70b6f
--- /dev/null
+++ b/tools/lib/api/fs/tracing_path.h
@@ -0,0 +1,16 @@
1#ifndef __API_FS_TRACING_PATH_H
2#define __API_FS_TRACING_PATH_H
3
4#include <linux/types.h>
5
6extern char tracing_path[];
7extern char tracing_events_path[];
8
9void tracing_path_set(const char *mountpoint);
10const char *tracing_path_mount(void);
11
12char *get_tracing_file(const char *name);
13void put_tracing_file(char *file);
14
15int tracing_path__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name);
16#endif /* __API_FS_TRACING_PATH_H */
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index f68d23a0b487..fc9af57b666e 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -64,8 +64,9 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
64#$(info Determined 'srctree' to be $(srctree)) 64#$(info Determined 'srctree' to be $(srctree))
65endif 65endif
66 66
67FEATURE_DISPLAY = libelf libelf-getphdrnum libelf-mmap bpf 67FEATURE_USER = .libbpf
68FEATURE_TESTS = libelf bpf 68FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf
69FEATURE_DISPLAY = libelf bpf
69 70
70INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi 71INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
71FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES) 72FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
@@ -122,8 +123,10 @@ endif
122# the same command line setup. 123# the same command line setup.
123MAKEOVERRIDES= 124MAKEOVERRIDES=
124 125
126all:
127
125export srctree OUTPUT CC LD CFLAGS V 128export srctree OUTPUT CC LD CFLAGS V
126build := -f $(srctree)/tools/build/Makefile.build dir=. obj 129include $(srctree)/tools/build/Makefile.include
127 130
128BPF_IN := $(OUTPUT)libbpf-in.o 131BPF_IN := $(OUTPUT)libbpf-in.o
129LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) 132LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
@@ -132,7 +135,7 @@ CMD_TARGETS = $(LIB_FILE)
132 135
133TARGETS = $(CMD_TARGETS) 136TARGETS = $(CMD_TARGETS)
134 137
135all: $(VERSION_FILES) all_cmd 138all: fixdep $(VERSION_FILES) all_cmd
136 139
137all_cmd: $(CMD_TARGETS) 140all_cmd: $(CMD_TARGETS)
138 141
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index 18ffccf00426..7e319afac78a 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -93,8 +93,10 @@ else
93 print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2'; 93 print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
94endif 94endif
95 95
96all:
97
96export srctree OUTPUT CC LD CFLAGS V 98export srctree OUTPUT CC LD CFLAGS V
97build := -f $(srctree)/tools/build/Makefile.build dir=. obj 99include $(srctree)/tools/build/Makefile.include
98 100
99do_compile_shared_library = \ 101do_compile_shared_library = \
100 ($(print_shared_lib_compile) \ 102 ($(print_shared_lib_compile) \
@@ -109,7 +111,7 @@ CMD_TARGETS = $(LIB_FILE)
109TARGETS = $(CMD_TARGETS) 111TARGETS = $(CMD_TARGETS)
110 112
111 113
112all: all_cmd 114all: fixdep all_cmd
113 115
114all_cmd: $(CMD_TARGETS) 116all_cmd: $(CMD_TARGETS)
115 117
diff --git a/tools/lib/symbol/kallsyms.c b/tools/lib/symbol/kallsyms.c
index 18bc271a4bbc..5e431077fcd6 100644
--- a/tools/lib/symbol/kallsyms.c
+++ b/tools/lib/symbol/kallsyms.c
@@ -2,6 +2,12 @@
2#include <stdio.h> 2#include <stdio.h>
3#include <stdlib.h> 3#include <stdlib.h>
4 4
5u8 kallsyms2elf_type(char type)
6{
7 type = tolower(type);
8 return (type == 't' || type == 'w') ? STT_FUNC : STT_OBJECT;
9}
10
5int kallsyms__parse(const char *filename, void *arg, 11int kallsyms__parse(const char *filename, void *arg,
6 int (*process_symbol)(void *arg, const char *name, 12 int (*process_symbol)(void *arg, const char *name,
7 char type, u64 start)) 13 char type, u64 start))
diff --git a/tools/lib/symbol/kallsyms.h b/tools/lib/symbol/kallsyms.h
index 6084f5e18b3c..4071316a766e 100644
--- a/tools/lib/symbol/kallsyms.h
+++ b/tools/lib/symbol/kallsyms.h
@@ -9,7 +9,7 @@
9#define KSYM_NAME_LEN 256 9#define KSYM_NAME_LEN 256
10#endif 10#endif
11 11
12static inline u8 kallsyms2elf_type(char type) 12static inline u8 kallsyms2elf_binding(char type)
13{ 13{
14 if (type == 'W') 14 if (type == 'W')
15 return STB_WEAK; 15 return STB_WEAK;
@@ -17,6 +17,8 @@ static inline u8 kallsyms2elf_type(char type)
17 return isupper(type) ? STB_GLOBAL : STB_LOCAL; 17 return isupper(type) ? STB_GLOBAL : STB_LOCAL;
18} 18}
19 19
20u8 kallsyms2elf_type(char type);
21
20int kallsyms__parse(const char *filename, void *arg, 22int kallsyms__parse(const char *filename, void *arg,
21 int (*process_symbol)(void *arg, const char *name, 23 int (*process_symbol)(void *arg, const char *name,
22 char type, u64 start)); 24 char type, u64 start));
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index cf42b090477b..2a912df6771b 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -848,6 +848,7 @@ static void free_arg(struct print_arg *arg)
848 free(arg->bitmask.bitmask); 848 free(arg->bitmask.bitmask);
849 break; 849 break;
850 case PRINT_DYNAMIC_ARRAY: 850 case PRINT_DYNAMIC_ARRAY:
851 case PRINT_DYNAMIC_ARRAY_LEN:
851 free(arg->dynarray.index); 852 free(arg->dynarray.index);
852 break; 853 break;
853 case PRINT_OP: 854 case PRINT_OP:
@@ -2729,6 +2730,42 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
2729} 2730}
2730 2731
2731static enum event_type 2732static enum event_type
2733process_dynamic_array_len(struct event_format *event, struct print_arg *arg,
2734 char **tok)
2735{
2736 struct format_field *field;
2737 enum event_type type;
2738 char *token;
2739
2740 if (read_expect_type(EVENT_ITEM, &token) < 0)
2741 goto out_free;
2742
2743 arg->type = PRINT_DYNAMIC_ARRAY_LEN;
2744
2745 /* Find the field */
2746 field = pevent_find_field(event, token);
2747 if (!field)
2748 goto out_free;
2749
2750 arg->dynarray.field = field;
2751 arg->dynarray.index = 0;
2752
2753 if (read_expected(EVENT_DELIM, ")") < 0)
2754 goto out_err;
2755
2756 type = read_token(&token);
2757 *tok = token;
2758
2759 return type;
2760
2761 out_free:
2762 free_token(token);
2763 out_err:
2764 *tok = NULL;
2765 return EVENT_ERROR;
2766}
2767
2768static enum event_type
2732process_paren(struct event_format *event, struct print_arg *arg, char **tok) 2769process_paren(struct event_format *event, struct print_arg *arg, char **tok)
2733{ 2770{
2734 struct print_arg *item_arg; 2771 struct print_arg *item_arg;
@@ -2975,6 +3012,10 @@ process_function(struct event_format *event, struct print_arg *arg,
2975 free_token(token); 3012 free_token(token);
2976 return process_dynamic_array(event, arg, tok); 3013 return process_dynamic_array(event, arg, tok);
2977 } 3014 }
3015 if (strcmp(token, "__get_dynamic_array_len") == 0) {
3016 free_token(token);
3017 return process_dynamic_array_len(event, arg, tok);
3018 }
2978 3019
2979 func = find_func_handler(event->pevent, token); 3020 func = find_func_handler(event->pevent, token);
2980 if (func) { 3021 if (func) {
@@ -3655,14 +3696,25 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
3655 goto out_warning_op; 3696 goto out_warning_op;
3656 } 3697 }
3657 break; 3698 break;
3699 case PRINT_DYNAMIC_ARRAY_LEN:
3700 offset = pevent_read_number(pevent,
3701 data + arg->dynarray.field->offset,
3702 arg->dynarray.field->size);
3703 /*
3704 * The total allocated length of the dynamic array is
3705 * stored in the top half of the field, and the offset
3706 * is in the bottom half of the 32 bit field.
3707 */
3708 val = (unsigned long long)(offset >> 16);
3709 break;
3658 case PRINT_DYNAMIC_ARRAY: 3710 case PRINT_DYNAMIC_ARRAY:
3659 /* Without [], we pass the address to the dynamic data */ 3711 /* Without [], we pass the address to the dynamic data */
3660 offset = pevent_read_number(pevent, 3712 offset = pevent_read_number(pevent,
3661 data + arg->dynarray.field->offset, 3713 data + arg->dynarray.field->offset,
3662 arg->dynarray.field->size); 3714 arg->dynarray.field->size);
3663 /* 3715 /*
3664 * The actual length of the dynamic array is stored 3716 * The total allocated length of the dynamic array is
3665 * in the top half of the field, and the offset 3717 * stored in the top half of the field, and the offset
3666 * is in the bottom half of the 32 bit field. 3718 * is in the bottom half of the 32 bit field.
3667 */ 3719 */
3668 offset &= 0xffff; 3720 offset &= 0xffff;
@@ -4853,8 +4905,8 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
4853 else 4905 else
4854 ls = 2; 4906 ls = 2;
4855 4907
4856 if (*(ptr+1) == 'F' || 4908 if (*(ptr+1) == 'F' || *(ptr+1) == 'f' ||
4857 *(ptr+1) == 'f') { 4909 *(ptr+1) == 'S' || *(ptr+1) == 's') {
4858 ptr++; 4910 ptr++;
4859 show_func = *ptr; 4911 show_func = *ptr;
4860 } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') { 4912 } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 204befb05a17..6fc83c7edbe9 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -294,6 +294,7 @@ enum print_arg_type {
294 PRINT_OP, 294 PRINT_OP,
295 PRINT_FUNC, 295 PRINT_FUNC,
296 PRINT_BITMASK, 296 PRINT_BITMASK,
297 PRINT_DYNAMIC_ARRAY_LEN,
297}; 298};
298 299
299struct print_arg { 300struct print_arg {
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
index 88fe83dff7cd..18536f756577 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -124,7 +124,10 @@ static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
124 _ER(WBINVD, 54) \ 124 _ER(WBINVD, 54) \
125 _ER(XSETBV, 55) \ 125 _ER(XSETBV, 55) \
126 _ER(APIC_WRITE, 56) \ 126 _ER(APIC_WRITE, 56) \
127 _ER(INVPCID, 58) 127 _ER(INVPCID, 58) \
128 _ER(PML_FULL, 62) \
129 _ER(XSAVES, 63) \
130 _ER(XRSTORS, 64)
128 131
129#define SVM_EXIT_REASONS \ 132#define SVM_EXIT_REASONS \
130 _ER(EXIT_READ_CR0, 0x000) \ 133 _ER(EXIT_READ_CR0, 0x000) \
@@ -352,15 +355,18 @@ static int kvm_nested_vmexit_handler(struct trace_seq *s, struct pevent_record *
352union kvm_mmu_page_role { 355union kvm_mmu_page_role {
353 unsigned word; 356 unsigned word;
354 struct { 357 struct {
355 unsigned glevels:4;
356 unsigned level:4; 358 unsigned level:4;
359 unsigned cr4_pae:1;
357 unsigned quadrant:2; 360 unsigned quadrant:2;
358 unsigned pad_for_nice_hex_output:6;
359 unsigned direct:1; 361 unsigned direct:1;
360 unsigned access:3; 362 unsigned access:3;
361 unsigned invalid:1; 363 unsigned invalid:1;
362 unsigned cr4_pge:1;
363 unsigned nxe:1; 364 unsigned nxe:1;
365 unsigned cr0_wp:1;
366 unsigned smep_and_not_wp:1;
367 unsigned smap_and_not_wp:1;
368 unsigned pad_for_nice_hex_output:8;
369 unsigned smm:8;
364 }; 370 };
365}; 371};
366 372
@@ -385,15 +391,18 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record,
385 if (pevent_is_file_bigendian(event->pevent) == 391 if (pevent_is_file_bigendian(event->pevent) ==
386 pevent_is_host_bigendian(event->pevent)) { 392 pevent_is_host_bigendian(event->pevent)) {
387 393
388 trace_seq_printf(s, "%u/%u q%u%s %s%s %spge %snxe", 394 trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
389 role.level, 395 role.level,
390 role.glevels,
391 role.quadrant, 396 role.quadrant,
392 role.direct ? " direct" : "", 397 role.direct ? " direct" : "",
393 access_str[role.access], 398 access_str[role.access],
394 role.invalid ? " invalid" : "", 399 role.invalid ? " invalid" : "",
395 role.cr4_pge ? "" : "!", 400 role.cr4_pae ? "" : "!",
396 role.nxe ? "" : "!"); 401 role.nxe ? "" : "!",
402 role.cr0_wp ? "" : "!",
403 role.smep_and_not_wp ? " smep" : "",
404 role.smap_and_not_wp ? " smap" : "",
405 role.smm ? " smm" : "");
397 } else 406 } else
398 trace_seq_printf(s, "WORD: %08x", role.word); 407 trace_seq_printf(s, "WORD: %08x", role.word);
399 408
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
index c94c9de3173e..be764f9ec769 100644
--- a/tools/perf/Documentation/intel-pt.txt
+++ b/tools/perf/Documentation/intel-pt.txt
@@ -671,6 +671,7 @@ The letters are:
671 e synthesize tracing error events 671 e synthesize tracing error events
672 d create a debug log 672 d create a debug log
673 g synthesize a call chain (use with i or x) 673 g synthesize a call chain (use with i or x)
674 l synthesize last branch entries (use with i or x)
674 675
675"Instructions" events look like they were recorded by "perf record -e 676"Instructions" events look like they were recorded by "perf record -e
676instructions". 677instructions".
@@ -707,12 +708,26 @@ on the sample is *not* adjusted and reflects the last known value of TSC.
707 708
708For Intel PT, the default period is 100us. 709For Intel PT, the default period is 100us.
709 710
711Setting it to a zero period means "as often as possible".
712
713In the case of Intel PT that is the same as a period of 1 and a unit of
714'instructions' (i.e. --itrace=i1i).
715
710Also the call chain size (default 16, max. 1024) for instructions or 716Also the call chain size (default 16, max. 1024) for instructions or
711transactions events can be specified. e.g. 717transactions events can be specified. e.g.
712 718
713 --itrace=ig32 719 --itrace=ig32
714 --itrace=xg32 720 --itrace=xg32
715 721
722Also the number of last branch entries (default 64, max. 1024) for instructions or
723transactions events can be specified. e.g.
724
725 --itrace=il10
726 --itrace=xl10
727
728Note that last branch entries are cleared for each sample, so there is no overlap
729from one sample to the next.
730
716To disable trace decoding entirely, use the option --no-itrace. 731To disable trace decoding entirely, use the option --no-itrace.
717 732
718 733
@@ -749,3 +764,32 @@ perf inject also accepts the --itrace option in which case tracing data is
749removed and replaced with the synthesized events. e.g. 764removed and replaced with the synthesized events. e.g.
750 765
751 perf inject --itrace -i perf.data -o perf.data.new 766 perf inject --itrace -i perf.data -o perf.data.new
767
768Below is an example of using Intel PT with autofdo. It requires autofdo
769(https://github.com/google/autofdo) and gcc version 5. The bubble
770sort example is from the AutoFDO tutorial (https://gcc.gnu.org/wiki/AutoFDO/Tutorial)
771amended to take the number of elements as a parameter.
772
773 $ gcc-5 -O3 sort.c -o sort_optimized
774 $ ./sort_optimized 30000
775 Bubble sorting array of 30000 elements
776 2254 ms
777
778 $ cat ~/.perfconfig
779 [intel-pt]
780 mispred-all
781
782 $ perf record -e intel_pt//u ./sort 3000
783 Bubble sorting array of 3000 elements
784 58 ms
785 [ perf record: Woken up 2 times to write data ]
786 [ perf record: Captured and wrote 3.939 MB perf.data ]
787 $ perf inject -i perf.data -o inj --itrace=i100usle --strip
788 $ ./create_gcov --binary=./sort --profile=inj --gcov=sort.gcov -gcov_version=1
789 $ gcc-5 -O3 -fauto-profile=sort.gcov sort.c -o sort_autofdo
790 $ ./sort_autofdo 30000
791 Bubble sorting array of 30000 elements
792 2155 ms
793
794Note there is currently no advantage to using Intel PT instead of LBR, but
795that may change in the future if greater use is made of the data.
diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt
index 2ff946677e3b..65453f4c7006 100644
--- a/tools/perf/Documentation/itrace.txt
+++ b/tools/perf/Documentation/itrace.txt
@@ -6,6 +6,7 @@
6 e synthesize error events 6 e synthesize error events
7 d create a debug log 7 d create a debug log
8 g synthesize a call chain (use with i or x) 8 g synthesize a call chain (use with i or x)
9 l synthesize last branch entries (use with i or x)
9 10
10 The default is all events i.e. the same as --itrace=ibxe 11 The default is all events i.e. the same as --itrace=ibxe
11 12
@@ -20,3 +21,6 @@
20 21
21 Also the call chain size (default 16, max. 1024) for instructions or 22 Also the call chain size (default 16, max. 1024) for instructions or
22 transactions events can be specified. 23 transactions events can be specified.
24
25 Also the number of last branch entries (default 64, max. 1024) for
26 instructions or transactions events can be specified.
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index ab632d9fbd7d..34750fc32714 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -82,7 +82,7 @@ Be multi thread instead of multi process
82Specify number of groups 82Specify number of groups
83 83
84-l:: 84-l::
85--loop=:: 85--nr_loops=::
86Specify number of loops 86Specify number of loops
87 87
88Example of *messaging* 88Example of *messaging*
@@ -139,64 +139,48 @@ Suite for evaluating performance of simple memory copy in various ways.
139Options of *memcpy* 139Options of *memcpy*
140^^^^^^^^^^^^^^^^^^^ 140^^^^^^^^^^^^^^^^^^^
141-l:: 141-l::
142--length:: 142--size::
143Specify length of memory to copy (default: 1MB). 143Specify size of memory to copy (default: 1MB).
144Available units are B, KB, MB, GB and TB (case insensitive). 144Available units are B, KB, MB, GB and TB (case insensitive).
145 145
146-r:: 146-f::
147--routine:: 147--function::
148Specify routine to copy (default: default). 148Specify function to copy (default: default).
149Available routines are depend on the architecture. 149Available functions are depend on the architecture.
150On x86-64, x86-64-unrolled, x86-64-movsq and x86-64-movsb are supported. 150On x86-64, x86-64-unrolled, x86-64-movsq and x86-64-movsb are supported.
151 151
152-i:: 152-l::
153--iterations:: 153--nr_loops::
154Repeat memcpy invocation this number of times. 154Repeat memcpy invocation this number of times.
155 155
156-c:: 156-c::
157--cycle:: 157--cycles::
158Use perf's cpu-cycles event instead of gettimeofday syscall. 158Use perf's cpu-cycles event instead of gettimeofday syscall.
159 159
160-o::
161--only-prefault::
162Show only the result with page faults before memcpy.
163
164-n::
165--no-prefault::
166Show only the result without page faults before memcpy.
167
168*memset*:: 160*memset*::
169Suite for evaluating performance of simple memory set in various ways. 161Suite for evaluating performance of simple memory set in various ways.
170 162
171Options of *memset* 163Options of *memset*
172^^^^^^^^^^^^^^^^^^^ 164^^^^^^^^^^^^^^^^^^^
173-l:: 165-l::
174--length:: 166--size::
175Specify length of memory to set (default: 1MB). 167Specify size of memory to set (default: 1MB).
176Available units are B, KB, MB, GB and TB (case insensitive). 168Available units are B, KB, MB, GB and TB (case insensitive).
177 169
178-r:: 170-f::
179--routine:: 171--function::
180Specify routine to set (default: default). 172Specify function to set (default: default).
181Available routines are depend on the architecture. 173Available functions are depend on the architecture.
182On x86-64, x86-64-unrolled, x86-64-stosq and x86-64-stosb are supported. 174On x86-64, x86-64-unrolled, x86-64-stosq and x86-64-stosb are supported.
183 175
184-i:: 176-l::
185--iterations:: 177--nr_loops::
186Repeat memset invocation this number of times. 178Repeat memset invocation this number of times.
187 179
188-c:: 180-c::
189--cycle:: 181--cycles::
190Use perf's cpu-cycles event instead of gettimeofday syscall. 182Use perf's cpu-cycles event instead of gettimeofday syscall.
191 183
192-o::
193--only-prefault::
194Show only the result with page faults before memset.
195
196-n::
197--no-prefault::
198Show only the result without page faults before memset.
199
200SUITES FOR 'numa' 184SUITES FOR 'numa'
201~~~~~~~~~~~~~~~~~ 185~~~~~~~~~~~~~~~~~
202*mem*:: 186*mem*::
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index 0c721c3e37e1..0b1cedeef895 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -50,6 +50,9 @@ OPTIONS
50 50
51include::itrace.txt[] 51include::itrace.txt[]
52 52
53--strip::
54 Use with --itrace to strip out non-synthesized events.
55
53SEE ALSO 56SEE ALSO
54-------- 57--------
55linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1] 58linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index bada8933fdd4..79483f40e991 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -30,6 +30,7 @@ counted. The following modifiers exist:
30 G - guest counting (in KVM guests) 30 G - guest counting (in KVM guests)
31 H - host counting (not in KVM guests) 31 H - host counting (not in KVM guests)
32 p - precise level 32 p - precise level
33 P - use maximum detected precise level
33 S - read sample value (PERF_SAMPLE_READ) 34 S - read sample value (PERF_SAMPLE_READ)
34 D - pin the event to the PMU 35 D - pin the event to the PMU
35 36
@@ -125,6 +126,8 @@ To limit the list use:
125. If none of the above is matched, it will apply the supplied glob to all 126. If none of the above is matched, it will apply the supplied glob to all
126 events, printing the ones that match. 127 events, printing the ones that match.
127 128
129. As a last resort, it will do a substring search in all event names.
130
128One or more types can be used at the same time, listing the events for the 131One or more types can be used at the same time, listing the events for the
129types specified. 132types specified.
130 133
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 2e9ce77b5e14..e630a7d2c348 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -144,7 +144,7 @@ OPTIONS
144 144
145--call-graph:: 145--call-graph::
146 Setup and enable call-graph (stack chain/backtrace) recording, 146 Setup and enable call-graph (stack chain/backtrace) recording,
147 implies -g. 147 implies -g. Default is "fp".
148 148
149 Allows specifying "fp" (frame pointer) or "dwarf" 149 Allows specifying "fp" (frame pointer) or "dwarf"
150 (DWARF's CFI - Call Frame Information) or "lbr" 150 (DWARF's CFI - Call Frame Information) or "lbr"
@@ -154,13 +154,18 @@ OPTIONS
154 In some systems, where binaries are build with gcc 154 In some systems, where binaries are build with gcc
155 --fomit-frame-pointer, using the "fp" method will produce bogus 155 --fomit-frame-pointer, using the "fp" method will produce bogus
156 call graphs, using "dwarf", if available (perf tools linked to 156 call graphs, using "dwarf", if available (perf tools linked to
157 the libunwind library) should be used instead. 157 the libunwind or libdw library) should be used instead.
158 Using the "lbr" method doesn't require any compiler options. It 158 Using the "lbr" method doesn't require any compiler options. It
159 will produce call graphs from the hardware LBR registers. The 159 will produce call graphs from the hardware LBR registers. The
160 main limition is that it is only available on new Intel 160 main limition is that it is only available on new Intel
161 platforms, such as Haswell. It can only get user call chain. It 161 platforms, such as Haswell. It can only get user call chain. It
162 doesn't work with branch stack sampling at the same time. 162 doesn't work with branch stack sampling at the same time.
163 163
164 When "dwarf" recording is used, perf also records (user) stack dump
165 when sampled. Default size of the stack dump is 8192 (bytes).
166 User can change the size by passing the size after comma like
167 "--call-graph dwarf,4096".
168
164-q:: 169-q::
165--quiet:: 170--quiet::
166 Don't print any message, useful for scripting. 171 Don't print any message, useful for scripting.
@@ -236,6 +241,7 @@ following filters are defined:
236 - any_call: any function call or system call 241 - any_call: any function call or system call
237 - any_ret: any function return or system call return 242 - any_ret: any function return or system call return
238 - ind_call: any indirect branch 243 - ind_call: any indirect branch
244 - call: direct calls, including far (to/from kernel) calls
239 - u: only when the branch target is at the user level 245 - u: only when the branch target is at the user level
240 - k: only when the branch target is in the kernel 246 - k: only when the branch target is in the kernel
241 - hv: only when the target is at the hypervisor level 247 - hv: only when the target is at the hypervisor level
@@ -308,6 +314,12 @@ This option sets the time out limit. The default value is 500 ms.
308Record context switch events i.e. events of type PERF_RECORD_SWITCH or 314Record context switch events i.e. events of type PERF_RECORD_SWITCH or
309PERF_RECORD_SWITCH_CPU_WIDE. 315PERF_RECORD_SWITCH_CPU_WIDE.
310 316
317--clang-path::
318Path to clang binary to use for compiling BPF scriptlets.
319
320--clang-opt::
321Options passed to clang when compiling BPF scriptlets.
322
311SEE ALSO 323SEE ALSO
312-------- 324--------
313linkperf:perf-stat[1], linkperf:perf-list[1] 325linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 9c7981bfddad..5ce8da1e1256 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -29,7 +29,7 @@ OPTIONS
29--show-nr-samples:: 29--show-nr-samples::
30 Show the number of samples for each symbol 30 Show the number of samples for each symbol
31 31
32--showcpuutilization:: 32--show-cpu-utilization::
33 Show sample percentage for different cpu modes. 33 Show sample percentage for different cpu modes.
34 34
35-T:: 35-T::
@@ -68,7 +68,7 @@ OPTIONS
68--sort=:: 68--sort=::
69 Sort histogram entries by given key(s) - multiple keys can be specified 69 Sort histogram entries by given key(s) - multiple keys can be specified
70 in CSV format. Following sort keys are available: 70 in CSV format. Following sort keys are available:
71 pid, comm, dso, symbol, parent, cpu, srcline, weight, local_weight. 71 pid, comm, dso, symbol, parent, cpu, socket, srcline, weight, local_weight.
72 72
73 Each key has following meaning: 73 Each key has following meaning:
74 74
@@ -79,6 +79,7 @@ OPTIONS
79 - parent: name of function matched to the parent regex filter. Unmatched 79 - parent: name of function matched to the parent regex filter. Unmatched
80 entries are displayed as "[other]". 80 entries are displayed as "[other]".
81 - cpu: cpu number the task ran at the time of sample 81 - cpu: cpu number the task ran at the time of sample
82 - socket: processor socket number the task ran at the time of sample
82 - srcline: filename and line number executed at the time of sample. The 83 - srcline: filename and line number executed at the time of sample. The
83 DWARF debugging info must be provided. 84 DWARF debugging info must be provided.
84 - srcfile: file name of the source file of the same. Requires dwarf 85 - srcfile: file name of the source file of the same. Requires dwarf
@@ -168,30 +169,40 @@ OPTIONS
168--dump-raw-trace:: 169--dump-raw-trace::
169 Dump raw trace in ASCII. 170 Dump raw trace in ASCII.
170 171
171-g [type,min[,limit],order[,key][,branch]]:: 172-g::
172--call-graph:: 173--call-graph=<print_type,threshold[,print_limit],order,sort_key,branch>::
173 Display call chains using type, min percent threshold, optional print 174 Display call chains using type, min percent threshold, print limit,
174 limit and order. 175 call order, sort key and branch. Note that ordering of parameters is not
175 type can be either: 176 fixed so any parement can be given in an arbitraty order. One exception
177 is the print_limit which should be preceded by threshold.
178
179 print_type can be either:
176 - flat: single column, linear exposure of call chains. 180 - flat: single column, linear exposure of call chains.
177 - graph: use a graph tree, displaying absolute overhead rates. 181 - graph: use a graph tree, displaying absolute overhead rates. (default)
178 - fractal: like graph, but displays relative rates. Each branch of 182 - fractal: like graph, but displays relative rates. Each branch of
179 the tree is considered as a new profiled object. + 183 the tree is considered as a new profiled object.
184 - none: disable call chain display.
185
186 threshold is a percentage value which specifies a minimum percent to be
187 included in the output call graph. Default is 0.5 (%).
188
189 print_limit is only applied when stdio interface is used. It's to limit
190 number of call graph entries in a single hist entry. Note that it needs
191 to be given after threshold (but not necessarily consecutive).
192 Default is 0 (unlimited).
180 193
181 order can be either: 194 order can be either:
182 - callee: callee based call graph. 195 - callee: callee based call graph.
183 - caller: inverted caller based call graph. 196 - caller: inverted caller based call graph.
197 Default is 'caller' when --children is used, otherwise 'callee'.
184 198
185 key can be: 199 sort_key can be:
186 - function: compare on functions 200 - function: compare on functions (default)
187 - address: compare on individual code addresses 201 - address: compare on individual code addresses
188 202
189 branch can be: 203 branch can be:
190 - branch: include last branch information in callgraph 204 - branch: include last branch information in callgraph when available.
191 when available. Usually more convenient to use --branch-history 205 Usually more convenient to use --branch-history for this.
192 for this.
193
194 Default: fractal,0.5,callee,function.
195 206
196--children:: 207--children::
197 Accumulate callchain of children to parent entry so that then can 208 Accumulate callchain of children to parent entry so that then can
@@ -204,6 +215,8 @@ OPTIONS
204 beyond the specified depth will be ignored. This is a trade-off 215 beyond the specified depth will be ignored. This is a trade-off
205 between information loss and faster processing especially for 216 between information loss and faster processing especially for
206 workloads that can have a very long callchain stack. 217 workloads that can have a very long callchain stack.
218 Note that when using the --itrace option the synthesized callchain size
219 will override this value if the synthesized callchain size is bigger.
207 220
208 Default: 127 221 Default: 127
209 222
@@ -349,6 +362,9 @@ include::itrace.txt[]
349 This option extends the perf report to show reference callgraphs, 362 This option extends the perf report to show reference callgraphs,
350 which collected by reference event, in no callgraph event. 363 which collected by reference event, in no callgraph event.
351 364
365--socket-filter::
366 Only report the samples on the processor socket that match with this filter
367
352include::callchain-overhead-calculation.txt[] 368include::callchain-overhead-calculation.txt[]
353 369
354SEE ALSO 370SEE ALSO
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index dc3ec783b7bd..382ddfb45d1d 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -112,11 +112,11 @@ OPTIONS
112--debug-mode:: 112--debug-mode::
113 Do various checks like samples ordering and lost events. 113 Do various checks like samples ordering and lost events.
114 114
115-f:: 115-F::
116--fields:: 116--fields::
117 Comma separated list of fields to print. Options are: 117 Comma separated list of fields to print. Options are:
118 comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, 118 comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
119 srcline, period, iregs, flags. 119 srcline, period, iregs, brstack, brstacksym, flags.
120 Field list can be prepended with the type, trace, sw or hw, 120 Field list can be prepended with the type, trace, sw or hw,
121 to indicate to which event type the field list applies. 121 to indicate to which event type the field list applies.
122 e.g., -f sw:comm,tid,time,ip,sym and -f trace:time,cpu,trace 122 e.g., -f sw:comm,tid,time,ip,sym and -f trace:time,cpu,trace
@@ -175,6 +175,16 @@ OPTIONS
175 Finally, a user may not set fields to none for all event types. 175 Finally, a user may not set fields to none for all event types.
176 i.e., -f "" is not allowed. 176 i.e., -f "" is not allowed.
177 177
178 The brstack output includes branch related information with raw addresses using the
179 /v/v/v/v/ syntax in the following order:
180 FROM: branch source instruction
181 TO : branch target instruction
182 M/P/-: M=branch target mispredicted or branch direction was mispredicted, P=target predicted or direction predicted, -=not supported
183 X/- : X=branch inside a transactional region, -=not in transaction region or not supported
184 A/- : A=TSX abort entry, -=not aborted region or not supported
185
186 The brstacksym is identical to brstack, except that the FROM and TO addresses are printed in a symbolic form if possible.
187
178-k:: 188-k::
179--vmlinux=<file>:: 189--vmlinux=<file>::
180 vmlinux pathname 190 vmlinux pathname
@@ -249,6 +259,9 @@ include::itrace.txt[]
249--full-source-path:: 259--full-source-path::
250 Show the full path for source files for srcline output. 260 Show the full path for source files for srcline output.
251 261
262--ns::
263 Use 9 decimal places when displaying time (i.e. show the nanoseconds)
264
252SEE ALSO 265SEE ALSO
253-------- 266--------
254linkperf:perf-record[1], linkperf:perf-script-perl[1], 267linkperf:perf-record[1], linkperf:perf-script-perl[1],
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 47469abdcc1c..4e074a660826 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -128,8 +128,9 @@ perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- m
128 128
129-I msecs:: 129-I msecs::
130--interval-print msecs:: 130--interval-print msecs::
131 Print count deltas every N milliseconds (minimum: 100ms) 131Print count deltas every N milliseconds (minimum: 10ms)
132 example: perf stat -I 1000 -e cycles -a sleep 5 132The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution.
133 example: 'perf stat -I 1000 -e cycles -a sleep 5'
133 134
134--per-socket:: 135--per-socket::
135Aggregate counts per processor socket for system-wide mode measurements. This 136Aggregate counts per processor socket for system-wide mode measurements. This
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index f6a23eb294e7..556cec09bf50 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -160,9 +160,10 @@ Default is to monitor all CPUS.
160-g:: 160-g::
161 Enables call-graph (stack chain/backtrace) recording. 161 Enables call-graph (stack chain/backtrace) recording.
162 162
163--call-graph:: 163--call-graph [mode,type,min[,limit],order[,key][,branch]]::
164 Setup and enable call-graph (stack chain/backtrace) recording, 164 Setup and enable call-graph (stack chain/backtrace) recording,
165 implies -g. 165 implies -g. See `--call-graph` section in perf-record and
166 perf-report man pages for details.
166 167
167--children:: 168--children::
168 Accumulate callchain of children to parent entry so that then can 169 Accumulate callchain of children to parent entry so that then can
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index 2b131776363e..864e37597252 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -27,6 +27,14 @@ OPTIONS
27 Setup buildid cache directory. It has higher priority than 27 Setup buildid cache directory. It has higher priority than
28 buildid.dir config file option. 28 buildid.dir config file option.
29 29
30-v::
31--version::
32 Display perf version.
33
34-h::
35--help::
36 Run perf help command.
37
30DESCRIPTION 38DESCRIPTION
31----------- 39-----------
32Performance counters for Linux are a new kernel-based subsystem 40Performance counters for Linux are a new kernel-based subsystem
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index af009bd6e6b7..39c38cb45b00 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -17,6 +17,7 @@ tools/build
17tools/arch/x86/include/asm/atomic.h 17tools/arch/x86/include/asm/atomic.h
18tools/arch/x86/include/asm/rmwcc.h 18tools/arch/x86/include/asm/rmwcc.h
19tools/lib/traceevent 19tools/lib/traceevent
20tools/lib/bpf
20tools/lib/api 21tools/lib/api
21tools/lib/bpf 22tools/lib/bpf
22tools/lib/hweight.c 23tools/lib/hweight.c
@@ -41,6 +42,7 @@ tools/include/asm-generic/bitops.h
41tools/include/linux/atomic.h 42tools/include/linux/atomic.h
42tools/include/linux/bitops.h 43tools/include/linux/bitops.h
43tools/include/linux/compiler.h 44tools/include/linux/compiler.h
45tools/include/linux/filter.h
44tools/include/linux/hash.h 46tools/include/linux/hash.h
45tools/include/linux/kernel.h 47tools/include/linux/kernel.h
46tools/include/linux/list.h 48tools/include/linux/list.h
@@ -49,6 +51,7 @@ tools/include/linux/poison.h
49tools/include/linux/rbtree.h 51tools/include/linux/rbtree.h
50tools/include/linux/rbtree_augmented.h 52tools/include/linux/rbtree_augmented.h
51tools/include/linux/types.h 53tools/include/linux/types.h
54tools/include/linux/err.h
52include/asm-generic/bitops/arch_hweight.h 55include/asm-generic/bitops/arch_hweight.h
53include/asm-generic/bitops/const_hweight.h 56include/asm-generic/bitops/const_hweight.h
54include/asm-generic/bitops/fls64.h 57include/asm-generic/bitops/fls64.h
@@ -67,6 +70,8 @@ arch/*/lib/memset*.S
67include/linux/poison.h 70include/linux/poison.h
68include/linux/hw_breakpoint.h 71include/linux/hw_breakpoint.h
69include/uapi/linux/perf_event.h 72include/uapi/linux/perf_event.h
73include/uapi/linux/bpf.h
74include/uapi/linux/bpf_common.h
70include/uapi/linux/const.h 75include/uapi/linux/const.h
71include/uapi/linux/swab.h 76include/uapi/linux/swab.h
72include/uapi/linux/hw_breakpoint.h 77include/uapi/linux/hw_breakpoint.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index d9863cb96f59..0d19d5447d6c 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -75,6 +75,8 @@ include config/utilities.mak
75# Define NO_LZMA if you do not want to support compressed (xz) kernel modules 75# Define NO_LZMA if you do not want to support compressed (xz) kernel modules
76# 76#
77# Define NO_AUXTRACE if you do not want AUX area tracing support 77# Define NO_AUXTRACE if you do not want AUX area tracing support
78#
79# Define NO_LIBBPF if you do not want BPF support
78 80
79# As per kernel Makefile, avoid funny character set dependencies 81# As per kernel Makefile, avoid funny character set dependencies
80unexport LC_ALL 82unexport LC_ALL
@@ -145,6 +147,7 @@ AWK = awk
145 147
146LIB_DIR = $(srctree)/tools/lib/api/ 148LIB_DIR = $(srctree)/tools/lib/api/
147TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/ 149TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
150BPF_DIR = $(srctree)/tools/lib/bpf/
148 151
149# include config/Makefile by default and rule out 152# include config/Makefile by default and rule out
150# non-config cases 153# non-config cases
@@ -180,6 +183,7 @@ strip-libs = $(filter-out -l%,$(1))
180 183
181ifneq ($(OUTPUT),) 184ifneq ($(OUTPUT),)
182 TE_PATH=$(OUTPUT) 185 TE_PATH=$(OUTPUT)
186 BPF_PATH=$(OUTPUT)
183ifneq ($(subdir),) 187ifneq ($(subdir),)
184 LIB_PATH=$(OUTPUT)/../lib/api/ 188 LIB_PATH=$(OUTPUT)/../lib/api/
185else 189else
@@ -188,6 +192,7 @@ endif
188else 192else
189 TE_PATH=$(TRACE_EVENT_DIR) 193 TE_PATH=$(TRACE_EVENT_DIR)
190 LIB_PATH=$(LIB_DIR) 194 LIB_PATH=$(LIB_DIR)
195 BPF_PATH=$(BPF_DIR)
191endif 196endif
192 197
193LIBTRACEEVENT = $(TE_PATH)libtraceevent.a 198LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
@@ -199,6 +204,8 @@ LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS = -Xlinker --dynamic-list=$(LIBTRACEEVENT_DYN
199LIBAPI = $(LIB_PATH)libapi.a 204LIBAPI = $(LIB_PATH)libapi.a
200export LIBAPI 205export LIBAPI
201 206
207LIBBPF = $(BPF_PATH)libbpf.a
208
202# python extension build directories 209# python extension build directories
203PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/ 210PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
204PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/ 211PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
@@ -251,6 +258,9 @@ export PERL_PATH
251LIB_FILE=$(OUTPUT)libperf.a 258LIB_FILE=$(OUTPUT)libperf.a
252 259
253PERFLIBS = $(LIB_FILE) $(LIBAPI) $(LIBTRACEEVENT) 260PERFLIBS = $(LIB_FILE) $(LIBAPI) $(LIBTRACEEVENT)
261ifndef NO_LIBBPF
262 PERFLIBS += $(LIBBPF)
263endif
254 264
255# We choose to avoid "if .. else if .. else .. endif endif" 265# We choose to avoid "if .. else if .. else .. endif endif"
256# because maintaining the nesting to match is a pain. If 266# because maintaining the nesting to match is a pain. If
@@ -297,16 +307,16 @@ strip: $(PROGRAMS) $(OUTPUT)perf
297PERF_IN := $(OUTPUT)perf-in.o 307PERF_IN := $(OUTPUT)perf-in.o
298 308
299export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK 309export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK
300build := -f $(srctree)/tools/build/Makefile.build dir=. obj 310include $(srctree)/tools/build/Makefile.include
301 311
302$(PERF_IN): $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h FORCE 312$(PERF_IN): prepare FORCE
303 $(Q)$(MAKE) $(build)=perf 313 $(Q)$(MAKE) $(build)=perf
304 314
305$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST) 315$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
306 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \ 316 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
307 $(PERF_IN) $(LIBS) -o $@ 317 $(PERF_IN) $(LIBS) -o $@
308 318
309$(GTK_IN): FORCE 319$(GTK_IN): fixdep FORCE
310 $(Q)$(MAKE) $(build)=gtk 320 $(Q)$(MAKE) $(build)=gtk
311 321
312$(OUTPUT)libperf-gtk.so: $(GTK_IN) $(PERFLIBS) 322$(OUTPUT)libperf-gtk.so: $(GTK_IN) $(PERFLIBS)
@@ -349,27 +359,27 @@ endif
349__build-dir = $(subst $(OUTPUT),,$(dir $@)) 359__build-dir = $(subst $(OUTPUT),,$(dir $@))
350build-dir = $(if $(__build-dir),$(__build-dir),.) 360build-dir = $(if $(__build-dir),$(__build-dir),.)
351 361
352single_dep: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h 362prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h fixdep
353 363
354$(OUTPUT)%.o: %.c single_dep FORCE 364$(OUTPUT)%.o: %.c prepare FORCE
355 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@ 365 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
356 366
357$(OUTPUT)%.i: %.c single_dep FORCE 367$(OUTPUT)%.i: %.c prepare FORCE
358 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@ 368 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
359 369
360$(OUTPUT)%.s: %.c single_dep FORCE 370$(OUTPUT)%.s: %.c prepare FORCE
361 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@ 371 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
362 372
363$(OUTPUT)%-bison.o: %.c single_dep FORCE 373$(OUTPUT)%-bison.o: %.c prepare FORCE
364 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@ 374 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
365 375
366$(OUTPUT)%-flex.o: %.c single_dep FORCE 376$(OUTPUT)%-flex.o: %.c prepare FORCE
367 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@ 377 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
368 378
369$(OUTPUT)%.o: %.S single_dep FORCE 379$(OUTPUT)%.o: %.S prepare FORCE
370 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@ 380 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
371 381
372$(OUTPUT)%.i: %.S single_dep FORCE 382$(OUTPUT)%.i: %.S prepare FORCE
373 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@ 383 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
374 384
375$(OUTPUT)perf-%: %.o $(PERFLIBS) 385$(OUTPUT)perf-%: %.o $(PERFLIBS)
@@ -389,7 +399,7 @@ $(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
389 399
390LIBPERF_IN := $(OUTPUT)libperf-in.o 400LIBPERF_IN := $(OUTPUT)libperf-in.o
391 401
392$(LIBPERF_IN): FORCE 402$(LIBPERF_IN): fixdep FORCE
393 $(Q)$(MAKE) $(build)=libperf 403 $(Q)$(MAKE) $(build)=libperf
394 404
395$(LIB_FILE): $(LIBPERF_IN) 405$(LIB_FILE): $(LIBPERF_IN)
@@ -397,10 +407,10 @@ $(LIB_FILE): $(LIBPERF_IN)
397 407
398LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 408LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
399 409
400$(LIBTRACEEVENT): FORCE 410$(LIBTRACEEVENT): fixdep FORCE
401 $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a 411 $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
402 412
403libtraceevent_plugins: FORCE 413libtraceevent_plugins: fixdep FORCE
404 $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins 414 $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
405 415
406$(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins 416$(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
@@ -413,13 +423,20 @@ $(LIBTRACEEVENT)-clean:
413install-traceevent-plugins: $(LIBTRACEEVENT) 423install-traceevent-plugins: $(LIBTRACEEVENT)
414 $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins 424 $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
415 425
416$(LIBAPI): FORCE 426$(LIBAPI): fixdep FORCE
417 $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a 427 $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
418 428
419$(LIBAPI)-clean: 429$(LIBAPI)-clean:
420 $(call QUIET_CLEAN, libapi) 430 $(call QUIET_CLEAN, libapi)
421 $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null 431 $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
422 432
433$(LIBBPF): fixdep FORCE
434 $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a
435
436$(LIBBPF)-clean:
437 $(call QUIET_CLEAN, libbpf)
438 $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null
439
423help: 440help:
424 @echo 'Perf make targets:' 441 @echo 'Perf make targets:'
425 @echo ' doc - make *all* documentation (see below)' 442 @echo ' doc - make *all* documentation (see below)'
@@ -459,7 +476,7 @@ INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
459$(DOC_TARGETS): 476$(DOC_TARGETS):
460 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all) 477 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all)
461 478
462TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol 479TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol ../include ../lib/bpf
463TAG_FILES= ../../include/uapi/linux/perf_event.h 480TAG_FILES= ../../include/uapi/linux/perf_event.h
464 481
465TAGS: 482TAGS:
@@ -567,7 +584,7 @@ config-clean:
567 $(call QUIET_CLEAN, config) 584 $(call QUIET_CLEAN, config)
568 $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null 585 $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
569 586
570clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean config-clean 587clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean config-clean
571 $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS) 588 $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
572 $(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete 589 $(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
573 $(Q)$(RM) $(OUTPUT).config-detected 590 $(Q)$(RM) $(OUTPUT).config-detected
@@ -591,6 +608,6 @@ FORCE:
591 608
592.PHONY: all install clean config-clean strip install-gtk 609.PHONY: all install clean config-clean strip install-gtk
593.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell 610.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
594.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE single_dep 611.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
595.PHONY: libtraceevent_plugins 612.PHONY: libtraceevent_plugins
596 613
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index b00dfd92ea73..e83c8ce24303 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -128,9 +128,8 @@ static const char *normalize_arch(char *arch)
128 return arch; 128 return arch;
129} 129}
130 130
131static int perf_session_env__lookup_binutils_path(struct perf_env *env, 131static int perf_env__lookup_binutils_path(struct perf_env *env,
132 const char *name, 132 const char *name, const char **path)
133 const char **path)
134{ 133{
135 int idx; 134 int idx;
136 const char *arch, *cross_env; 135 const char *arch, *cross_env;
@@ -206,7 +205,7 @@ out_error:
206 return -1; 205 return -1;
207} 206}
208 207
209int perf_session_env__lookup_objdump(struct perf_env *env) 208int perf_env__lookup_objdump(struct perf_env *env)
210{ 209{
211 /* 210 /*
212 * For live mode, env->arch will be NULL and we can use 211 * For live mode, env->arch will be NULL and we can use
@@ -215,6 +214,5 @@ int perf_session_env__lookup_objdump(struct perf_env *env)
215 if (env->arch == NULL) 214 if (env->arch == NULL)
216 return 0; 215 return 0;
217 216
218 return perf_session_env__lookup_binutils_path(env, "objdump", 217 return perf_env__lookup_binutils_path(env, "objdump", &objdump_path);
219 &objdump_path);
220} 218}
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index 20176df69fc8..7529cfb143ce 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -1,10 +1,10 @@
1#ifndef ARCH_PERF_COMMON_H 1#ifndef ARCH_PERF_COMMON_H
2#define ARCH_PERF_COMMON_H 2#define ARCH_PERF_COMMON_H
3 3
4#include "../util/session.h" 4#include "../util/env.h"
5 5
6extern const char *objdump_path; 6extern const char *objdump_path;
7 7
8int perf_session_env__lookup_objdump(struct perf_env *env); 8int perf_env__lookup_objdump(struct perf_env *env);
9 9
10#endif /* ARCH_PERF_COMMON_H */ 10#endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build
index 41bf61da476a..db52fa22d3a1 100644
--- a/tools/perf/arch/x86/Build
+++ b/tools/perf/arch/x86/Build
@@ -1,2 +1,2 @@
1libperf-y += util/ 1libperf-y += util/
2libperf-$(CONFIG_DWARF_UNWIND) += tests/ 2libperf-y += tests/
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 21322e0385b8..09ba923debe8 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -2,3 +2,4 @@ ifndef NO_DWARF
2PERF_HAVE_DWARF_REGS := 1 2PERF_HAVE_DWARF_REGS := 1
3endif 3endif
4HAVE_KVM_STAT_SUPPORT := 1 4HAVE_KVM_STAT_SUPPORT := 1
5PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
new file mode 100644
index 000000000000..7ed00f4b0908
--- /dev/null
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -0,0 +1,19 @@
1#ifndef ARCH_TESTS_H
2#define ARCH_TESTS_H
3
4/* Tests */
5int test__rdpmc(void);
6int test__perf_time_to_tsc(void);
7int test__insn_x86(void);
8int test__intel_cqm_count_nmi_context(void);
9
10#ifdef HAVE_DWARF_UNWIND_SUPPORT
11struct thread;
12struct perf_sample;
13int test__arch_unwind_sample(struct perf_sample *sample,
14 struct thread *thread);
15#endif
16
17extern struct test arch_tests[];
18
19#endif
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index b30eff9bcc83..cbb7e978166b 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -1,2 +1,8 @@
1libperf-y += regs_load.o 1libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
2libperf-y += dwarf-unwind.o 2libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
3
4libperf-y += arch-tests.o
5libperf-y += rdpmc.o
6libperf-y += perf-time-to-tsc.o
7libperf-$(CONFIG_AUXTRACE) += insn-x86.o
8libperf-y += intel-cqm.o
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
new file mode 100644
index 000000000000..2218cb64f840
--- /dev/null
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -0,0 +1,34 @@
1#include <string.h>
2#include "tests/tests.h"
3#include "arch-tests.h"
4
5struct test arch_tests[] = {
6 {
7 .desc = "x86 rdpmc test",
8 .func = test__rdpmc,
9 },
10 {
11 .desc = "Test converting perf time to TSC",
12 .func = test__perf_time_to_tsc,
13 },
14#ifdef HAVE_DWARF_UNWIND_SUPPORT
15 {
16 .desc = "Test dwarf unwind",
17 .func = test__dwarf_unwind,
18 },
19#endif
20#ifdef HAVE_AUXTRACE_SUPPORT
21 {
22 .desc = "Test x86 instruction decoder - new instructions",
23 .func = test__insn_x86,
24 },
25#endif
26 {
27 .desc = "Test intel cqm nmi context read",
28 .func = test__intel_cqm_count_nmi_context,
29 },
30 {
31 .func = NULL,
32 },
33
34};
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index d8bbf7ad1681..7f209ce827bf 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -5,6 +5,7 @@
5#include "event.h" 5#include "event.h"
6#include "debug.h" 6#include "debug.h"
7#include "tests/tests.h" 7#include "tests/tests.h"
8#include "arch-tests.h"
8 9
9#define STACK_SIZE 8192 10#define STACK_SIZE 8192
10 11
diff --git a/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk b/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
new file mode 100644
index 000000000000..a21454835cd4
--- /dev/null
+++ b/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
@@ -0,0 +1,75 @@
1#!/bin/awk -f
2# gen-insn-x86-dat.awk: script to convert data for the insn-x86 test
3# Copyright (c) 2015, Intel Corporation.
4#
5# This program is free software; you can redistribute it and/or modify it
6# under the terms and conditions of the GNU General Public License,
7# version 2, as published by the Free Software Foundation.
8#
9# This program is distributed in the hope it will be useful, but WITHOUT
10# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12# more details.
13
14BEGIN {
15 print "/*"
16 print " * Generated by gen-insn-x86-dat.sh and gen-insn-x86-dat.awk"
17 print " * from insn-x86-dat-src.c for inclusion by insn-x86.c"
18 print " * Do not change this code."
19 print "*/\n"
20 op = ""
21 branch = ""
22 rel = 0
23 going = 0
24}
25
26/ Start here / {
27 going = 1
28}
29
30/ Stop here / {
31 going = 0
32}
33
34/^\s*[0-9a-fA-F]+\:/ {
35 if (going) {
36 colon_pos = index($0, ":")
37 useful_line = substr($0, colon_pos + 1)
38 first_pos = match(useful_line, "[0-9a-fA-F]")
39 useful_line = substr(useful_line, first_pos)
40 gsub("\t", "\\t", useful_line)
41 printf "{{"
42 len = 0
43 for (i = 2; i <= NF; i++) {
44 if (match($i, "^[0-9a-fA-F][0-9a-fA-F]$")) {
45 printf "0x%s, ", $i
46 len += 1
47 } else {
48 break
49 }
50 }
51 printf "}, %d, %s, \"%s\", \"%s\",", len, rel, op, branch
52 printf "\n\"%s\",},\n", useful_line
53 op = ""
54 branch = ""
55 rel = 0
56 }
57}
58
59/ Expecting: / {
60 expecting_str = " Expecting: "
61 expecting_len = length(expecting_str)
62 expecting_pos = index($0, expecting_str)
63 useful_line = substr($0, expecting_pos + expecting_len)
64 for (i = 1; i <= NF; i++) {
65 if ($i == "Expecting:") {
66 i++
67 op = $i
68 i++
69 branch = $i
70 i++
71 rel = $i
72 break
73 }
74 }
75}
diff --git a/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh b/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh
new file mode 100755
index 000000000000..2d4ef94cff98
--- /dev/null
+++ b/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh
@@ -0,0 +1,43 @@
1#!/bin/sh
2# gen-insn-x86-dat: generate data for the insn-x86 test
3# Copyright (c) 2015, Intel Corporation.
4#
5# This program is free software; you can redistribute it and/or modify it
6# under the terms and conditions of the GNU General Public License,
7# version 2, as published by the Free Software Foundation.
8#
9# This program is distributed in the hope it will be useful, but WITHOUT
10# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12# more details.
13
14set -e
15
16if [ "$(uname -m)" != "x86_64" ]; then
17 echo "ERROR: This script only works on x86_64"
18 exit 1
19fi
20
21cd $(dirname $0)
22
23trap 'echo "Might need a more recent version of binutils"' EXIT
24
25echo "Compiling insn-x86-dat-src.c to 64-bit object"
26
27gcc -g -c insn-x86-dat-src.c
28
29objdump -dSw insn-x86-dat-src.o | awk -f gen-insn-x86-dat.awk > insn-x86-dat-64.c
30
31rm -f insn-x86-dat-src.o
32
33echo "Compiling insn-x86-dat-src.c to 32-bit object"
34
35gcc -g -c -m32 insn-x86-dat-src.c
36
37objdump -dSw insn-x86-dat-src.o | awk -f gen-insn-x86-dat.awk > insn-x86-dat-32.c
38
39rm -f insn-x86-dat-src.o
40
41trap - EXIT
42
43echo "Done (use git diff to see the changes)"
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-32.c b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
new file mode 100644
index 000000000000..3b491cfe204e
--- /dev/null
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
@@ -0,0 +1,658 @@
1/*
2 * Generated by gen-insn-x86-dat.sh and gen-insn-x86-dat.awk
3 * from insn-x86-dat-src.c for inclusion by insn-x86.c
4 * Do not change this code.
5*/
6
7{{0x0f, 0x31, }, 2, 0, "", "",
8"0f 31 \trdtsc ",},
9{{0xf3, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
10"f3 0f 1b 00 \tbndmk (%eax),%bnd0",},
11{{0xf3, 0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
12"f3 0f 1b 05 78 56 34 12 \tbndmk 0x12345678,%bnd0",},
13{{0xf3, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
14"f3 0f 1b 18 \tbndmk (%eax),%bnd3",},
15{{0xf3, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
16"f3 0f 1b 04 01 \tbndmk (%ecx,%eax,1),%bnd0",},
17{{0xf3, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
18"f3 0f 1b 04 05 78 56 34 12 \tbndmk 0x12345678(,%eax,1),%bnd0",},
19{{0xf3, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
20"f3 0f 1b 04 08 \tbndmk (%eax,%ecx,1),%bnd0",},
21{{0xf3, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
22"f3 0f 1b 04 c8 \tbndmk (%eax,%ecx,8),%bnd0",},
23{{0xf3, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
24"f3 0f 1b 40 12 \tbndmk 0x12(%eax),%bnd0",},
25{{0xf3, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
26"f3 0f 1b 45 12 \tbndmk 0x12(%ebp),%bnd0",},
27{{0xf3, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
28"f3 0f 1b 44 01 12 \tbndmk 0x12(%ecx,%eax,1),%bnd0",},
29{{0xf3, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
30"f3 0f 1b 44 05 12 \tbndmk 0x12(%ebp,%eax,1),%bnd0",},
31{{0xf3, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
32"f3 0f 1b 44 08 12 \tbndmk 0x12(%eax,%ecx,1),%bnd0",},
33{{0xf3, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
34"f3 0f 1b 44 c8 12 \tbndmk 0x12(%eax,%ecx,8),%bnd0",},
35{{0xf3, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
36"f3 0f 1b 80 78 56 34 12 \tbndmk 0x12345678(%eax),%bnd0",},
37{{0xf3, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
38"f3 0f 1b 85 78 56 34 12 \tbndmk 0x12345678(%ebp),%bnd0",},
39{{0xf3, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
40"f3 0f 1b 84 01 78 56 34 12 \tbndmk 0x12345678(%ecx,%eax,1),%bnd0",},
41{{0xf3, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
42"f3 0f 1b 84 05 78 56 34 12 \tbndmk 0x12345678(%ebp,%eax,1),%bnd0",},
43{{0xf3, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
44"f3 0f 1b 84 08 78 56 34 12 \tbndmk 0x12345678(%eax,%ecx,1),%bnd0",},
45{{0xf3, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
46"f3 0f 1b 84 c8 78 56 34 12 \tbndmk 0x12345678(%eax,%ecx,8),%bnd0",},
47{{0xf3, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
48"f3 0f 1a 00 \tbndcl (%eax),%bnd0",},
49{{0xf3, 0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
50"f3 0f 1a 05 78 56 34 12 \tbndcl 0x12345678,%bnd0",},
51{{0xf3, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
52"f3 0f 1a 18 \tbndcl (%eax),%bnd3",},
53{{0xf3, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
54"f3 0f 1a 04 01 \tbndcl (%ecx,%eax,1),%bnd0",},
55{{0xf3, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
56"f3 0f 1a 04 05 78 56 34 12 \tbndcl 0x12345678(,%eax,1),%bnd0",},
57{{0xf3, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
58"f3 0f 1a 04 08 \tbndcl (%eax,%ecx,1),%bnd0",},
59{{0xf3, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
60"f3 0f 1a 04 c8 \tbndcl (%eax,%ecx,8),%bnd0",},
61{{0xf3, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
62"f3 0f 1a 40 12 \tbndcl 0x12(%eax),%bnd0",},
63{{0xf3, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
64"f3 0f 1a 45 12 \tbndcl 0x12(%ebp),%bnd0",},
65{{0xf3, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
66"f3 0f 1a 44 01 12 \tbndcl 0x12(%ecx,%eax,1),%bnd0",},
67{{0xf3, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
68"f3 0f 1a 44 05 12 \tbndcl 0x12(%ebp,%eax,1),%bnd0",},
69{{0xf3, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
70"f3 0f 1a 44 08 12 \tbndcl 0x12(%eax,%ecx,1),%bnd0",},
71{{0xf3, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
72"f3 0f 1a 44 c8 12 \tbndcl 0x12(%eax,%ecx,8),%bnd0",},
73{{0xf3, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
74"f3 0f 1a 80 78 56 34 12 \tbndcl 0x12345678(%eax),%bnd0",},
75{{0xf3, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
76"f3 0f 1a 85 78 56 34 12 \tbndcl 0x12345678(%ebp),%bnd0",},
77{{0xf3, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
78"f3 0f 1a 84 01 78 56 34 12 \tbndcl 0x12345678(%ecx,%eax,1),%bnd0",},
79{{0xf3, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
80"f3 0f 1a 84 05 78 56 34 12 \tbndcl 0x12345678(%ebp,%eax,1),%bnd0",},
81{{0xf3, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
82"f3 0f 1a 84 08 78 56 34 12 \tbndcl 0x12345678(%eax,%ecx,1),%bnd0",},
83{{0xf3, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
84"f3 0f 1a 84 c8 78 56 34 12 \tbndcl 0x12345678(%eax,%ecx,8),%bnd0",},
85{{0xf3, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
86"f3 0f 1a c0 \tbndcl %eax,%bnd0",},
87{{0xf2, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
88"f2 0f 1a 00 \tbndcu (%eax),%bnd0",},
89{{0xf2, 0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
90"f2 0f 1a 05 78 56 34 12 \tbndcu 0x12345678,%bnd0",},
91{{0xf2, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
92"f2 0f 1a 18 \tbndcu (%eax),%bnd3",},
93{{0xf2, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
94"f2 0f 1a 04 01 \tbndcu (%ecx,%eax,1),%bnd0",},
95{{0xf2, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
96"f2 0f 1a 04 05 78 56 34 12 \tbndcu 0x12345678(,%eax,1),%bnd0",},
97{{0xf2, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
98"f2 0f 1a 04 08 \tbndcu (%eax,%ecx,1),%bnd0",},
99{{0xf2, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
100"f2 0f 1a 04 c8 \tbndcu (%eax,%ecx,8),%bnd0",},
101{{0xf2, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
102"f2 0f 1a 40 12 \tbndcu 0x12(%eax),%bnd0",},
103{{0xf2, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
104"f2 0f 1a 45 12 \tbndcu 0x12(%ebp),%bnd0",},
105{{0xf2, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
106"f2 0f 1a 44 01 12 \tbndcu 0x12(%ecx,%eax,1),%bnd0",},
107{{0xf2, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
108"f2 0f 1a 44 05 12 \tbndcu 0x12(%ebp,%eax,1),%bnd0",},
109{{0xf2, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
110"f2 0f 1a 44 08 12 \tbndcu 0x12(%eax,%ecx,1),%bnd0",},
111{{0xf2, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
112"f2 0f 1a 44 c8 12 \tbndcu 0x12(%eax,%ecx,8),%bnd0",},
113{{0xf2, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
114"f2 0f 1a 80 78 56 34 12 \tbndcu 0x12345678(%eax),%bnd0",},
115{{0xf2, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
116"f2 0f 1a 85 78 56 34 12 \tbndcu 0x12345678(%ebp),%bnd0",},
117{{0xf2, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
118"f2 0f 1a 84 01 78 56 34 12 \tbndcu 0x12345678(%ecx,%eax,1),%bnd0",},
119{{0xf2, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
120"f2 0f 1a 84 05 78 56 34 12 \tbndcu 0x12345678(%ebp,%eax,1),%bnd0",},
121{{0xf2, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
122"f2 0f 1a 84 08 78 56 34 12 \tbndcu 0x12345678(%eax,%ecx,1),%bnd0",},
123{{0xf2, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
124"f2 0f 1a 84 c8 78 56 34 12 \tbndcu 0x12345678(%eax,%ecx,8),%bnd0",},
125{{0xf2, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
126"f2 0f 1a c0 \tbndcu %eax,%bnd0",},
127{{0xf2, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
128"f2 0f 1b 00 \tbndcn (%eax),%bnd0",},
129{{0xf2, 0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
130"f2 0f 1b 05 78 56 34 12 \tbndcn 0x12345678,%bnd0",},
131{{0xf2, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
132"f2 0f 1b 18 \tbndcn (%eax),%bnd3",},
133{{0xf2, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
134"f2 0f 1b 04 01 \tbndcn (%ecx,%eax,1),%bnd0",},
135{{0xf2, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
136"f2 0f 1b 04 05 78 56 34 12 \tbndcn 0x12345678(,%eax,1),%bnd0",},
137{{0xf2, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
138"f2 0f 1b 04 08 \tbndcn (%eax,%ecx,1),%bnd0",},
139{{0xf2, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
140"f2 0f 1b 04 c8 \tbndcn (%eax,%ecx,8),%bnd0",},
141{{0xf2, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
142"f2 0f 1b 40 12 \tbndcn 0x12(%eax),%bnd0",},
143{{0xf2, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
144"f2 0f 1b 45 12 \tbndcn 0x12(%ebp),%bnd0",},
145{{0xf2, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
146"f2 0f 1b 44 01 12 \tbndcn 0x12(%ecx,%eax,1),%bnd0",},
147{{0xf2, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
148"f2 0f 1b 44 05 12 \tbndcn 0x12(%ebp,%eax,1),%bnd0",},
149{{0xf2, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
150"f2 0f 1b 44 08 12 \tbndcn 0x12(%eax,%ecx,1),%bnd0",},
151{{0xf2, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
152"f2 0f 1b 44 c8 12 \tbndcn 0x12(%eax,%ecx,8),%bnd0",},
153{{0xf2, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
154"f2 0f 1b 80 78 56 34 12 \tbndcn 0x12345678(%eax),%bnd0",},
155{{0xf2, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
156"f2 0f 1b 85 78 56 34 12 \tbndcn 0x12345678(%ebp),%bnd0",},
157{{0xf2, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
158"f2 0f 1b 84 01 78 56 34 12 \tbndcn 0x12345678(%ecx,%eax,1),%bnd0",},
159{{0xf2, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
160"f2 0f 1b 84 05 78 56 34 12 \tbndcn 0x12345678(%ebp,%eax,1),%bnd0",},
161{{0xf2, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
162"f2 0f 1b 84 08 78 56 34 12 \tbndcn 0x12345678(%eax,%ecx,1),%bnd0",},
163{{0xf2, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
164"f2 0f 1b 84 c8 78 56 34 12 \tbndcn 0x12345678(%eax,%ecx,8),%bnd0",},
165{{0xf2, 0x0f, 0x1b, 0xc0, }, 4, 0, "", "",
166"f2 0f 1b c0 \tbndcn %eax,%bnd0",},
167{{0x66, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
168"66 0f 1a 00 \tbndmov (%eax),%bnd0",},
169{{0x66, 0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
170"66 0f 1a 05 78 56 34 12 \tbndmov 0x12345678,%bnd0",},
171{{0x66, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
172"66 0f 1a 18 \tbndmov (%eax),%bnd3",},
173{{0x66, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
174"66 0f 1a 04 01 \tbndmov (%ecx,%eax,1),%bnd0",},
175{{0x66, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
176"66 0f 1a 04 05 78 56 34 12 \tbndmov 0x12345678(,%eax,1),%bnd0",},
177{{0x66, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
178"66 0f 1a 04 08 \tbndmov (%eax,%ecx,1),%bnd0",},
179{{0x66, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
180"66 0f 1a 04 c8 \tbndmov (%eax,%ecx,8),%bnd0",},
181{{0x66, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
182"66 0f 1a 40 12 \tbndmov 0x12(%eax),%bnd0",},
183{{0x66, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
184"66 0f 1a 45 12 \tbndmov 0x12(%ebp),%bnd0",},
185{{0x66, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
186"66 0f 1a 44 01 12 \tbndmov 0x12(%ecx,%eax,1),%bnd0",},
187{{0x66, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
188"66 0f 1a 44 05 12 \tbndmov 0x12(%ebp,%eax,1),%bnd0",},
189{{0x66, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
190"66 0f 1a 44 08 12 \tbndmov 0x12(%eax,%ecx,1),%bnd0",},
191{{0x66, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
192"66 0f 1a 44 c8 12 \tbndmov 0x12(%eax,%ecx,8),%bnd0",},
193{{0x66, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
194"66 0f 1a 80 78 56 34 12 \tbndmov 0x12345678(%eax),%bnd0",},
195{{0x66, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
196"66 0f 1a 85 78 56 34 12 \tbndmov 0x12345678(%ebp),%bnd0",},
197{{0x66, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
198"66 0f 1a 84 01 78 56 34 12 \tbndmov 0x12345678(%ecx,%eax,1),%bnd0",},
199{{0x66, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
200"66 0f 1a 84 05 78 56 34 12 \tbndmov 0x12345678(%ebp,%eax,1),%bnd0",},
201{{0x66, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
202"66 0f 1a 84 08 78 56 34 12 \tbndmov 0x12345678(%eax,%ecx,1),%bnd0",},
203{{0x66, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
204"66 0f 1a 84 c8 78 56 34 12 \tbndmov 0x12345678(%eax,%ecx,8),%bnd0",},
205{{0x66, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
206"66 0f 1b 00 \tbndmov %bnd0,(%eax)",},
207{{0x66, 0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
208"66 0f 1b 05 78 56 34 12 \tbndmov %bnd0,0x12345678",},
209{{0x66, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
210"66 0f 1b 18 \tbndmov %bnd3,(%eax)",},
211{{0x66, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
212"66 0f 1b 04 01 \tbndmov %bnd0,(%ecx,%eax,1)",},
213{{0x66, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
214"66 0f 1b 04 05 78 56 34 12 \tbndmov %bnd0,0x12345678(,%eax,1)",},
215{{0x66, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
216"66 0f 1b 04 08 \tbndmov %bnd0,(%eax,%ecx,1)",},
217{{0x66, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
218"66 0f 1b 04 c8 \tbndmov %bnd0,(%eax,%ecx,8)",},
219{{0x66, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
220"66 0f 1b 40 12 \tbndmov %bnd0,0x12(%eax)",},
221{{0x66, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
222"66 0f 1b 45 12 \tbndmov %bnd0,0x12(%ebp)",},
223{{0x66, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
224"66 0f 1b 44 01 12 \tbndmov %bnd0,0x12(%ecx,%eax,1)",},
225{{0x66, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
226"66 0f 1b 44 05 12 \tbndmov %bnd0,0x12(%ebp,%eax,1)",},
227{{0x66, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
228"66 0f 1b 44 08 12 \tbndmov %bnd0,0x12(%eax,%ecx,1)",},
229{{0x66, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
230"66 0f 1b 44 c8 12 \tbndmov %bnd0,0x12(%eax,%ecx,8)",},
231{{0x66, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
232"66 0f 1b 80 78 56 34 12 \tbndmov %bnd0,0x12345678(%eax)",},
233{{0x66, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
234"66 0f 1b 85 78 56 34 12 \tbndmov %bnd0,0x12345678(%ebp)",},
235{{0x66, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
236"66 0f 1b 84 01 78 56 34 12 \tbndmov %bnd0,0x12345678(%ecx,%eax,1)",},
237{{0x66, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
238"66 0f 1b 84 05 78 56 34 12 \tbndmov %bnd0,0x12345678(%ebp,%eax,1)",},
239{{0x66, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
240"66 0f 1b 84 08 78 56 34 12 \tbndmov %bnd0,0x12345678(%eax,%ecx,1)",},
241{{0x66, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
242"66 0f 1b 84 c8 78 56 34 12 \tbndmov %bnd0,0x12345678(%eax,%ecx,8)",},
243{{0x66, 0x0f, 0x1a, 0xc8, }, 4, 0, "", "",
244"66 0f 1a c8 \tbndmov %bnd0,%bnd1",},
245{{0x66, 0x0f, 0x1a, 0xc1, }, 4, 0, "", "",
246"66 0f 1a c1 \tbndmov %bnd1,%bnd0",},
247{{0x0f, 0x1a, 0x00, }, 3, 0, "", "",
248"0f 1a 00 \tbndldx (%eax),%bnd0",},
249{{0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
250"0f 1a 05 78 56 34 12 \tbndldx 0x12345678,%bnd0",},
251{{0x0f, 0x1a, 0x18, }, 3, 0, "", "",
252"0f 1a 18 \tbndldx (%eax),%bnd3",},
253{{0x0f, 0x1a, 0x04, 0x01, }, 4, 0, "", "",
254"0f 1a 04 01 \tbndldx (%ecx,%eax,1),%bnd0",},
255{{0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
256"0f 1a 04 05 78 56 34 12 \tbndldx 0x12345678(,%eax,1),%bnd0",},
257{{0x0f, 0x1a, 0x04, 0x08, }, 4, 0, "", "",
258"0f 1a 04 08 \tbndldx (%eax,%ecx,1),%bnd0",},
259{{0x0f, 0x1a, 0x40, 0x12, }, 4, 0, "", "",
260"0f 1a 40 12 \tbndldx 0x12(%eax),%bnd0",},
261{{0x0f, 0x1a, 0x45, 0x12, }, 4, 0, "", "",
262"0f 1a 45 12 \tbndldx 0x12(%ebp),%bnd0",},
263{{0x0f, 0x1a, 0x44, 0x01, 0x12, }, 5, 0, "", "",
264"0f 1a 44 01 12 \tbndldx 0x12(%ecx,%eax,1),%bnd0",},
265{{0x0f, 0x1a, 0x44, 0x05, 0x12, }, 5, 0, "", "",
266"0f 1a 44 05 12 \tbndldx 0x12(%ebp,%eax,1),%bnd0",},
267{{0x0f, 0x1a, 0x44, 0x08, 0x12, }, 5, 0, "", "",
268"0f 1a 44 08 12 \tbndldx 0x12(%eax,%ecx,1),%bnd0",},
269{{0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
270"0f 1a 80 78 56 34 12 \tbndldx 0x12345678(%eax),%bnd0",},
271{{0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
272"0f 1a 85 78 56 34 12 \tbndldx 0x12345678(%ebp),%bnd0",},
273{{0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
274"0f 1a 84 01 78 56 34 12 \tbndldx 0x12345678(%ecx,%eax,1),%bnd0",},
275{{0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
276"0f 1a 84 05 78 56 34 12 \tbndldx 0x12345678(%ebp,%eax,1),%bnd0",},
277{{0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
278"0f 1a 84 08 78 56 34 12 \tbndldx 0x12345678(%eax,%ecx,1),%bnd0",},
279{{0x0f, 0x1b, 0x00, }, 3, 0, "", "",
280"0f 1b 00 \tbndstx %bnd0,(%eax)",},
281{{0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
282"0f 1b 05 78 56 34 12 \tbndstx %bnd0,0x12345678",},
283{{0x0f, 0x1b, 0x18, }, 3, 0, "", "",
284"0f 1b 18 \tbndstx %bnd3,(%eax)",},
285{{0x0f, 0x1b, 0x04, 0x01, }, 4, 0, "", "",
286"0f 1b 04 01 \tbndstx %bnd0,(%ecx,%eax,1)",},
287{{0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
288"0f 1b 04 05 78 56 34 12 \tbndstx %bnd0,0x12345678(,%eax,1)",},
289{{0x0f, 0x1b, 0x04, 0x08, }, 4, 0, "", "",
290"0f 1b 04 08 \tbndstx %bnd0,(%eax,%ecx,1)",},
291{{0x0f, 0x1b, 0x40, 0x12, }, 4, 0, "", "",
292"0f 1b 40 12 \tbndstx %bnd0,0x12(%eax)",},
293{{0x0f, 0x1b, 0x45, 0x12, }, 4, 0, "", "",
294"0f 1b 45 12 \tbndstx %bnd0,0x12(%ebp)",},
295{{0x0f, 0x1b, 0x44, 0x01, 0x12, }, 5, 0, "", "",
296"0f 1b 44 01 12 \tbndstx %bnd0,0x12(%ecx,%eax,1)",},
297{{0x0f, 0x1b, 0x44, 0x05, 0x12, }, 5, 0, "", "",
298"0f 1b 44 05 12 \tbndstx %bnd0,0x12(%ebp,%eax,1)",},
299{{0x0f, 0x1b, 0x44, 0x08, 0x12, }, 5, 0, "", "",
300"0f 1b 44 08 12 \tbndstx %bnd0,0x12(%eax,%ecx,1)",},
301{{0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
302"0f 1b 80 78 56 34 12 \tbndstx %bnd0,0x12345678(%eax)",},
303{{0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
304"0f 1b 85 78 56 34 12 \tbndstx %bnd0,0x12345678(%ebp)",},
305{{0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
306"0f 1b 84 01 78 56 34 12 \tbndstx %bnd0,0x12345678(%ecx,%eax,1)",},
307{{0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
308"0f 1b 84 05 78 56 34 12 \tbndstx %bnd0,0x12345678(%ebp,%eax,1)",},
309{{0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
310"0f 1b 84 08 78 56 34 12 \tbndstx %bnd0,0x12345678(%eax,%ecx,1)",},
311{{0xf2, 0xe8, 0xfc, 0xff, 0xff, 0xff, }, 6, 0xfffffffc, "call", "unconditional",
312"f2 e8 fc ff ff ff \tbnd call 3c3 <main+0x3c3>",},
313{{0xf2, 0xff, 0x10, }, 3, 0, "call", "indirect",
314"f2 ff 10 \tbnd call *(%eax)",},
315{{0xf2, 0xc3, }, 2, 0, "ret", "indirect",
316"f2 c3 \tbnd ret ",},
317{{0xf2, 0xe9, 0xfc, 0xff, 0xff, 0xff, }, 6, 0xfffffffc, "jmp", "unconditional",
318"f2 e9 fc ff ff ff \tbnd jmp 3ce <main+0x3ce>",},
319{{0xf2, 0xe9, 0xfc, 0xff, 0xff, 0xff, }, 6, 0xfffffffc, "jmp", "unconditional",
320"f2 e9 fc ff ff ff \tbnd jmp 3d4 <main+0x3d4>",},
321{{0xf2, 0xff, 0x21, }, 3, 0, "jmp", "indirect",
322"f2 ff 21 \tbnd jmp *(%ecx)",},
323{{0xf2, 0x0f, 0x85, 0xfc, 0xff, 0xff, 0xff, }, 7, 0xfffffffc, "jcc", "conditional",
324"f2 0f 85 fc ff ff ff \tbnd jne 3de <main+0x3de>",},
325{{0x0f, 0x3a, 0xcc, 0xc1, 0x00, }, 5, 0, "", "",
326"0f 3a cc c1 00 \tsha1rnds4 $0x0,%xmm1,%xmm0",},
327{{0x0f, 0x3a, 0xcc, 0xd7, 0x91, }, 5, 0, "", "",
328"0f 3a cc d7 91 \tsha1rnds4 $0x91,%xmm7,%xmm2",},
329{{0x0f, 0x3a, 0xcc, 0x00, 0x91, }, 5, 0, "", "",
330"0f 3a cc 00 91 \tsha1rnds4 $0x91,(%eax),%xmm0",},
331{{0x0f, 0x3a, 0xcc, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
332"0f 3a cc 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678,%xmm0",},
333{{0x0f, 0x3a, 0xcc, 0x18, 0x91, }, 5, 0, "", "",
334"0f 3a cc 18 91 \tsha1rnds4 $0x91,(%eax),%xmm3",},
335{{0x0f, 0x3a, 0xcc, 0x04, 0x01, 0x91, }, 6, 0, "", "",
336"0f 3a cc 04 01 91 \tsha1rnds4 $0x91,(%ecx,%eax,1),%xmm0",},
337{{0x0f, 0x3a, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
338"0f 3a cc 04 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(,%eax,1),%xmm0",},
339{{0x0f, 0x3a, 0xcc, 0x04, 0x08, 0x91, }, 6, 0, "", "",
340"0f 3a cc 04 08 91 \tsha1rnds4 $0x91,(%eax,%ecx,1),%xmm0",},
341{{0x0f, 0x3a, 0xcc, 0x04, 0xc8, 0x91, }, 6, 0, "", "",
342"0f 3a cc 04 c8 91 \tsha1rnds4 $0x91,(%eax,%ecx,8),%xmm0",},
343{{0x0f, 0x3a, 0xcc, 0x40, 0x12, 0x91, }, 6, 0, "", "",
344"0f 3a cc 40 12 91 \tsha1rnds4 $0x91,0x12(%eax),%xmm0",},
345{{0x0f, 0x3a, 0xcc, 0x45, 0x12, 0x91, }, 6, 0, "", "",
346"0f 3a cc 45 12 91 \tsha1rnds4 $0x91,0x12(%ebp),%xmm0",},
347{{0x0f, 0x3a, 0xcc, 0x44, 0x01, 0x12, 0x91, }, 7, 0, "", "",
348"0f 3a cc 44 01 12 91 \tsha1rnds4 $0x91,0x12(%ecx,%eax,1),%xmm0",},
349{{0x0f, 0x3a, 0xcc, 0x44, 0x05, 0x12, 0x91, }, 7, 0, "", "",
350"0f 3a cc 44 05 12 91 \tsha1rnds4 $0x91,0x12(%ebp,%eax,1),%xmm0",},
351{{0x0f, 0x3a, 0xcc, 0x44, 0x08, 0x12, 0x91, }, 7, 0, "", "",
352"0f 3a cc 44 08 12 91 \tsha1rnds4 $0x91,0x12(%eax,%ecx,1),%xmm0",},
353{{0x0f, 0x3a, 0xcc, 0x44, 0xc8, 0x12, 0x91, }, 7, 0, "", "",
354"0f 3a cc 44 c8 12 91 \tsha1rnds4 $0x91,0x12(%eax,%ecx,8),%xmm0",},
355{{0x0f, 0x3a, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
356"0f 3a cc 80 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%eax),%xmm0",},
357{{0x0f, 0x3a, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
358"0f 3a cc 85 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%ebp),%xmm0",},
359{{0x0f, 0x3a, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
360"0f 3a cc 84 01 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%ecx,%eax,1),%xmm0",},
361{{0x0f, 0x3a, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
362"0f 3a cc 84 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%ebp,%eax,1),%xmm0",},
363{{0x0f, 0x3a, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
364"0f 3a cc 84 08 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%eax,%ecx,1),%xmm0",},
365{{0x0f, 0x3a, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
366"0f 3a cc 84 c8 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%eax,%ecx,8),%xmm0",},
367{{0x0f, 0x38, 0xc8, 0xc1, }, 4, 0, "", "",
368"0f 38 c8 c1 \tsha1nexte %xmm1,%xmm0",},
369{{0x0f, 0x38, 0xc8, 0xd7, }, 4, 0, "", "",
370"0f 38 c8 d7 \tsha1nexte %xmm7,%xmm2",},
371{{0x0f, 0x38, 0xc8, 0x00, }, 4, 0, "", "",
372"0f 38 c8 00 \tsha1nexte (%eax),%xmm0",},
373{{0x0f, 0x38, 0xc8, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
374"0f 38 c8 05 78 56 34 12 \tsha1nexte 0x12345678,%xmm0",},
375{{0x0f, 0x38, 0xc8, 0x18, }, 4, 0, "", "",
376"0f 38 c8 18 \tsha1nexte (%eax),%xmm3",},
377{{0x0f, 0x38, 0xc8, 0x04, 0x01, }, 5, 0, "", "",
378"0f 38 c8 04 01 \tsha1nexte (%ecx,%eax,1),%xmm0",},
379{{0x0f, 0x38, 0xc8, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
380"0f 38 c8 04 05 78 56 34 12 \tsha1nexte 0x12345678(,%eax,1),%xmm0",},
381{{0x0f, 0x38, 0xc8, 0x04, 0x08, }, 5, 0, "", "",
382"0f 38 c8 04 08 \tsha1nexte (%eax,%ecx,1),%xmm0",},
383{{0x0f, 0x38, 0xc8, 0x04, 0xc8, }, 5, 0, "", "",
384"0f 38 c8 04 c8 \tsha1nexte (%eax,%ecx,8),%xmm0",},
385{{0x0f, 0x38, 0xc8, 0x40, 0x12, }, 5, 0, "", "",
386"0f 38 c8 40 12 \tsha1nexte 0x12(%eax),%xmm0",},
387{{0x0f, 0x38, 0xc8, 0x45, 0x12, }, 5, 0, "", "",
388"0f 38 c8 45 12 \tsha1nexte 0x12(%ebp),%xmm0",},
389{{0x0f, 0x38, 0xc8, 0x44, 0x01, 0x12, }, 6, 0, "", "",
390"0f 38 c8 44 01 12 \tsha1nexte 0x12(%ecx,%eax,1),%xmm0",},
391{{0x0f, 0x38, 0xc8, 0x44, 0x05, 0x12, }, 6, 0, "", "",
392"0f 38 c8 44 05 12 \tsha1nexte 0x12(%ebp,%eax,1),%xmm0",},
393{{0x0f, 0x38, 0xc8, 0x44, 0x08, 0x12, }, 6, 0, "", "",
394"0f 38 c8 44 08 12 \tsha1nexte 0x12(%eax,%ecx,1),%xmm0",},
395{{0x0f, 0x38, 0xc8, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
396"0f 38 c8 44 c8 12 \tsha1nexte 0x12(%eax,%ecx,8),%xmm0",},
397{{0x0f, 0x38, 0xc8, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
398"0f 38 c8 80 78 56 34 12 \tsha1nexte 0x12345678(%eax),%xmm0",},
399{{0x0f, 0x38, 0xc8, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
400"0f 38 c8 85 78 56 34 12 \tsha1nexte 0x12345678(%ebp),%xmm0",},
401{{0x0f, 0x38, 0xc8, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
402"0f 38 c8 84 01 78 56 34 12 \tsha1nexte 0x12345678(%ecx,%eax,1),%xmm0",},
403{{0x0f, 0x38, 0xc8, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
404"0f 38 c8 84 05 78 56 34 12 \tsha1nexte 0x12345678(%ebp,%eax,1),%xmm0",},
405{{0x0f, 0x38, 0xc8, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
406"0f 38 c8 84 08 78 56 34 12 \tsha1nexte 0x12345678(%eax,%ecx,1),%xmm0",},
407{{0x0f, 0x38, 0xc8, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
408"0f 38 c8 84 c8 78 56 34 12 \tsha1nexte 0x12345678(%eax,%ecx,8),%xmm0",},
409{{0x0f, 0x38, 0xc9, 0xc1, }, 4, 0, "", "",
410"0f 38 c9 c1 \tsha1msg1 %xmm1,%xmm0",},
411{{0x0f, 0x38, 0xc9, 0xd7, }, 4, 0, "", "",
412"0f 38 c9 d7 \tsha1msg1 %xmm7,%xmm2",},
413{{0x0f, 0x38, 0xc9, 0x00, }, 4, 0, "", "",
414"0f 38 c9 00 \tsha1msg1 (%eax),%xmm0",},
415{{0x0f, 0x38, 0xc9, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
416"0f 38 c9 05 78 56 34 12 \tsha1msg1 0x12345678,%xmm0",},
417{{0x0f, 0x38, 0xc9, 0x18, }, 4, 0, "", "",
418"0f 38 c9 18 \tsha1msg1 (%eax),%xmm3",},
419{{0x0f, 0x38, 0xc9, 0x04, 0x01, }, 5, 0, "", "",
420"0f 38 c9 04 01 \tsha1msg1 (%ecx,%eax,1),%xmm0",},
421{{0x0f, 0x38, 0xc9, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
422"0f 38 c9 04 05 78 56 34 12 \tsha1msg1 0x12345678(,%eax,1),%xmm0",},
423{{0x0f, 0x38, 0xc9, 0x04, 0x08, }, 5, 0, "", "",
424"0f 38 c9 04 08 \tsha1msg1 (%eax,%ecx,1),%xmm0",},
425{{0x0f, 0x38, 0xc9, 0x04, 0xc8, }, 5, 0, "", "",
426"0f 38 c9 04 c8 \tsha1msg1 (%eax,%ecx,8),%xmm0",},
427{{0x0f, 0x38, 0xc9, 0x40, 0x12, }, 5, 0, "", "",
428"0f 38 c9 40 12 \tsha1msg1 0x12(%eax),%xmm0",},
429{{0x0f, 0x38, 0xc9, 0x45, 0x12, }, 5, 0, "", "",
430"0f 38 c9 45 12 \tsha1msg1 0x12(%ebp),%xmm0",},
431{{0x0f, 0x38, 0xc9, 0x44, 0x01, 0x12, }, 6, 0, "", "",
432"0f 38 c9 44 01 12 \tsha1msg1 0x12(%ecx,%eax,1),%xmm0",},
433{{0x0f, 0x38, 0xc9, 0x44, 0x05, 0x12, }, 6, 0, "", "",
434"0f 38 c9 44 05 12 \tsha1msg1 0x12(%ebp,%eax,1),%xmm0",},
435{{0x0f, 0x38, 0xc9, 0x44, 0x08, 0x12, }, 6, 0, "", "",
436"0f 38 c9 44 08 12 \tsha1msg1 0x12(%eax,%ecx,1),%xmm0",},
437{{0x0f, 0x38, 0xc9, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
438"0f 38 c9 44 c8 12 \tsha1msg1 0x12(%eax,%ecx,8),%xmm0",},
439{{0x0f, 0x38, 0xc9, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
440"0f 38 c9 80 78 56 34 12 \tsha1msg1 0x12345678(%eax),%xmm0",},
441{{0x0f, 0x38, 0xc9, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
442"0f 38 c9 85 78 56 34 12 \tsha1msg1 0x12345678(%ebp),%xmm0",},
443{{0x0f, 0x38, 0xc9, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
444"0f 38 c9 84 01 78 56 34 12 \tsha1msg1 0x12345678(%ecx,%eax,1),%xmm0",},
445{{0x0f, 0x38, 0xc9, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
446"0f 38 c9 84 05 78 56 34 12 \tsha1msg1 0x12345678(%ebp,%eax,1),%xmm0",},
447{{0x0f, 0x38, 0xc9, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
448"0f 38 c9 84 08 78 56 34 12 \tsha1msg1 0x12345678(%eax,%ecx,1),%xmm0",},
449{{0x0f, 0x38, 0xc9, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
450"0f 38 c9 84 c8 78 56 34 12 \tsha1msg1 0x12345678(%eax,%ecx,8),%xmm0",},
451{{0x0f, 0x38, 0xca, 0xc1, }, 4, 0, "", "",
452"0f 38 ca c1 \tsha1msg2 %xmm1,%xmm0",},
453{{0x0f, 0x38, 0xca, 0xd7, }, 4, 0, "", "",
454"0f 38 ca d7 \tsha1msg2 %xmm7,%xmm2",},
455{{0x0f, 0x38, 0xca, 0x00, }, 4, 0, "", "",
456"0f 38 ca 00 \tsha1msg2 (%eax),%xmm0",},
457{{0x0f, 0x38, 0xca, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
458"0f 38 ca 05 78 56 34 12 \tsha1msg2 0x12345678,%xmm0",},
459{{0x0f, 0x38, 0xca, 0x18, }, 4, 0, "", "",
460"0f 38 ca 18 \tsha1msg2 (%eax),%xmm3",},
461{{0x0f, 0x38, 0xca, 0x04, 0x01, }, 5, 0, "", "",
462"0f 38 ca 04 01 \tsha1msg2 (%ecx,%eax,1),%xmm0",},
463{{0x0f, 0x38, 0xca, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
464"0f 38 ca 04 05 78 56 34 12 \tsha1msg2 0x12345678(,%eax,1),%xmm0",},
465{{0x0f, 0x38, 0xca, 0x04, 0x08, }, 5, 0, "", "",
466"0f 38 ca 04 08 \tsha1msg2 (%eax,%ecx,1),%xmm0",},
467{{0x0f, 0x38, 0xca, 0x04, 0xc8, }, 5, 0, "", "",
468"0f 38 ca 04 c8 \tsha1msg2 (%eax,%ecx,8),%xmm0",},
469{{0x0f, 0x38, 0xca, 0x40, 0x12, }, 5, 0, "", "",
470"0f 38 ca 40 12 \tsha1msg2 0x12(%eax),%xmm0",},
471{{0x0f, 0x38, 0xca, 0x45, 0x12, }, 5, 0, "", "",
472"0f 38 ca 45 12 \tsha1msg2 0x12(%ebp),%xmm0",},
473{{0x0f, 0x38, 0xca, 0x44, 0x01, 0x12, }, 6, 0, "", "",
474"0f 38 ca 44 01 12 \tsha1msg2 0x12(%ecx,%eax,1),%xmm0",},
475{{0x0f, 0x38, 0xca, 0x44, 0x05, 0x12, }, 6, 0, "", "",
476"0f 38 ca 44 05 12 \tsha1msg2 0x12(%ebp,%eax,1),%xmm0",},
477{{0x0f, 0x38, 0xca, 0x44, 0x08, 0x12, }, 6, 0, "", "",
478"0f 38 ca 44 08 12 \tsha1msg2 0x12(%eax,%ecx,1),%xmm0",},
479{{0x0f, 0x38, 0xca, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
480"0f 38 ca 44 c8 12 \tsha1msg2 0x12(%eax,%ecx,8),%xmm0",},
481{{0x0f, 0x38, 0xca, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
482"0f 38 ca 80 78 56 34 12 \tsha1msg2 0x12345678(%eax),%xmm0",},
483{{0x0f, 0x38, 0xca, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
484"0f 38 ca 85 78 56 34 12 \tsha1msg2 0x12345678(%ebp),%xmm0",},
485{{0x0f, 0x38, 0xca, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
486"0f 38 ca 84 01 78 56 34 12 \tsha1msg2 0x12345678(%ecx,%eax,1),%xmm0",},
487{{0x0f, 0x38, 0xca, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
488"0f 38 ca 84 05 78 56 34 12 \tsha1msg2 0x12345678(%ebp,%eax,1),%xmm0",},
489{{0x0f, 0x38, 0xca, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
490"0f 38 ca 84 08 78 56 34 12 \tsha1msg2 0x12345678(%eax,%ecx,1),%xmm0",},
491{{0x0f, 0x38, 0xca, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
492"0f 38 ca 84 c8 78 56 34 12 \tsha1msg2 0x12345678(%eax,%ecx,8),%xmm0",},
493{{0x0f, 0x38, 0xcb, 0xcc, }, 4, 0, "", "",
494"0f 38 cb cc \tsha256rnds2 %xmm0,%xmm4,%xmm1",},
495{{0x0f, 0x38, 0xcb, 0xd7, }, 4, 0, "", "",
496"0f 38 cb d7 \tsha256rnds2 %xmm0,%xmm7,%xmm2",},
497{{0x0f, 0x38, 0xcb, 0x08, }, 4, 0, "", "",
498"0f 38 cb 08 \tsha256rnds2 %xmm0,(%eax),%xmm1",},
499{{0x0f, 0x38, 0xcb, 0x0d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
500"0f 38 cb 0d 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678,%xmm1",},
501{{0x0f, 0x38, 0xcb, 0x18, }, 4, 0, "", "",
502"0f 38 cb 18 \tsha256rnds2 %xmm0,(%eax),%xmm3",},
503{{0x0f, 0x38, 0xcb, 0x0c, 0x01, }, 5, 0, "", "",
504"0f 38 cb 0c 01 \tsha256rnds2 %xmm0,(%ecx,%eax,1),%xmm1",},
505{{0x0f, 0x38, 0xcb, 0x0c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
506"0f 38 cb 0c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(,%eax,1),%xmm1",},
507{{0x0f, 0x38, 0xcb, 0x0c, 0x08, }, 5, 0, "", "",
508"0f 38 cb 0c 08 \tsha256rnds2 %xmm0,(%eax,%ecx,1),%xmm1",},
509{{0x0f, 0x38, 0xcb, 0x0c, 0xc8, }, 5, 0, "", "",
510"0f 38 cb 0c c8 \tsha256rnds2 %xmm0,(%eax,%ecx,8),%xmm1",},
511{{0x0f, 0x38, 0xcb, 0x48, 0x12, }, 5, 0, "", "",
512"0f 38 cb 48 12 \tsha256rnds2 %xmm0,0x12(%eax),%xmm1",},
513{{0x0f, 0x38, 0xcb, 0x4d, 0x12, }, 5, 0, "", "",
514"0f 38 cb 4d 12 \tsha256rnds2 %xmm0,0x12(%ebp),%xmm1",},
515{{0x0f, 0x38, 0xcb, 0x4c, 0x01, 0x12, }, 6, 0, "", "",
516"0f 38 cb 4c 01 12 \tsha256rnds2 %xmm0,0x12(%ecx,%eax,1),%xmm1",},
517{{0x0f, 0x38, 0xcb, 0x4c, 0x05, 0x12, }, 6, 0, "", "",
518"0f 38 cb 4c 05 12 \tsha256rnds2 %xmm0,0x12(%ebp,%eax,1),%xmm1",},
519{{0x0f, 0x38, 0xcb, 0x4c, 0x08, 0x12, }, 6, 0, "", "",
520"0f 38 cb 4c 08 12 \tsha256rnds2 %xmm0,0x12(%eax,%ecx,1),%xmm1",},
521{{0x0f, 0x38, 0xcb, 0x4c, 0xc8, 0x12, }, 6, 0, "", "",
522"0f 38 cb 4c c8 12 \tsha256rnds2 %xmm0,0x12(%eax,%ecx,8),%xmm1",},
523{{0x0f, 0x38, 0xcb, 0x88, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
524"0f 38 cb 88 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%eax),%xmm1",},
525{{0x0f, 0x38, 0xcb, 0x8d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
526"0f 38 cb 8d 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%ebp),%xmm1",},
527{{0x0f, 0x38, 0xcb, 0x8c, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
528"0f 38 cb 8c 01 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%ecx,%eax,1),%xmm1",},
529{{0x0f, 0x38, 0xcb, 0x8c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
530"0f 38 cb 8c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%ebp,%eax,1),%xmm1",},
531{{0x0f, 0x38, 0xcb, 0x8c, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
532"0f 38 cb 8c 08 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%eax,%ecx,1),%xmm1",},
533{{0x0f, 0x38, 0xcb, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
534"0f 38 cb 8c c8 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%eax,%ecx,8),%xmm1",},
535{{0x0f, 0x38, 0xcc, 0xc1, }, 4, 0, "", "",
536"0f 38 cc c1 \tsha256msg1 %xmm1,%xmm0",},
537{{0x0f, 0x38, 0xcc, 0xd7, }, 4, 0, "", "",
538"0f 38 cc d7 \tsha256msg1 %xmm7,%xmm2",},
539{{0x0f, 0x38, 0xcc, 0x00, }, 4, 0, "", "",
540"0f 38 cc 00 \tsha256msg1 (%eax),%xmm0",},
541{{0x0f, 0x38, 0xcc, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
542"0f 38 cc 05 78 56 34 12 \tsha256msg1 0x12345678,%xmm0",},
543{{0x0f, 0x38, 0xcc, 0x18, }, 4, 0, "", "",
544"0f 38 cc 18 \tsha256msg1 (%eax),%xmm3",},
545{{0x0f, 0x38, 0xcc, 0x04, 0x01, }, 5, 0, "", "",
546"0f 38 cc 04 01 \tsha256msg1 (%ecx,%eax,1),%xmm0",},
547{{0x0f, 0x38, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
548"0f 38 cc 04 05 78 56 34 12 \tsha256msg1 0x12345678(,%eax,1),%xmm0",},
549{{0x0f, 0x38, 0xcc, 0x04, 0x08, }, 5, 0, "", "",
550"0f 38 cc 04 08 \tsha256msg1 (%eax,%ecx,1),%xmm0",},
551{{0x0f, 0x38, 0xcc, 0x04, 0xc8, }, 5, 0, "", "",
552"0f 38 cc 04 c8 \tsha256msg1 (%eax,%ecx,8),%xmm0",},
553{{0x0f, 0x38, 0xcc, 0x40, 0x12, }, 5, 0, "", "",
554"0f 38 cc 40 12 \tsha256msg1 0x12(%eax),%xmm0",},
555{{0x0f, 0x38, 0xcc, 0x45, 0x12, }, 5, 0, "", "",
556"0f 38 cc 45 12 \tsha256msg1 0x12(%ebp),%xmm0",},
557{{0x0f, 0x38, 0xcc, 0x44, 0x01, 0x12, }, 6, 0, "", "",
558"0f 38 cc 44 01 12 \tsha256msg1 0x12(%ecx,%eax,1),%xmm0",},
559{{0x0f, 0x38, 0xcc, 0x44, 0x05, 0x12, }, 6, 0, "", "",
560"0f 38 cc 44 05 12 \tsha256msg1 0x12(%ebp,%eax,1),%xmm0",},
561{{0x0f, 0x38, 0xcc, 0x44, 0x08, 0x12, }, 6, 0, "", "",
562"0f 38 cc 44 08 12 \tsha256msg1 0x12(%eax,%ecx,1),%xmm0",},
563{{0x0f, 0x38, 0xcc, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
564"0f 38 cc 44 c8 12 \tsha256msg1 0x12(%eax,%ecx,8),%xmm0",},
565{{0x0f, 0x38, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
566"0f 38 cc 80 78 56 34 12 \tsha256msg1 0x12345678(%eax),%xmm0",},
567{{0x0f, 0x38, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
568"0f 38 cc 85 78 56 34 12 \tsha256msg1 0x12345678(%ebp),%xmm0",},
569{{0x0f, 0x38, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
570"0f 38 cc 84 01 78 56 34 12 \tsha256msg1 0x12345678(%ecx,%eax,1),%xmm0",},
571{{0x0f, 0x38, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
572"0f 38 cc 84 05 78 56 34 12 \tsha256msg1 0x12345678(%ebp,%eax,1),%xmm0",},
573{{0x0f, 0x38, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
574"0f 38 cc 84 08 78 56 34 12 \tsha256msg1 0x12345678(%eax,%ecx,1),%xmm0",},
575{{0x0f, 0x38, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
576"0f 38 cc 84 c8 78 56 34 12 \tsha256msg1 0x12345678(%eax,%ecx,8),%xmm0",},
577{{0x0f, 0x38, 0xcd, 0xc1, }, 4, 0, "", "",
578"0f 38 cd c1 \tsha256msg2 %xmm1,%xmm0",},
579{{0x0f, 0x38, 0xcd, 0xd7, }, 4, 0, "", "",
580"0f 38 cd d7 \tsha256msg2 %xmm7,%xmm2",},
581{{0x0f, 0x38, 0xcd, 0x00, }, 4, 0, "", "",
582"0f 38 cd 00 \tsha256msg2 (%eax),%xmm0",},
583{{0x0f, 0x38, 0xcd, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
584"0f 38 cd 05 78 56 34 12 \tsha256msg2 0x12345678,%xmm0",},
585{{0x0f, 0x38, 0xcd, 0x18, }, 4, 0, "", "",
586"0f 38 cd 18 \tsha256msg2 (%eax),%xmm3",},
587{{0x0f, 0x38, 0xcd, 0x04, 0x01, }, 5, 0, "", "",
588"0f 38 cd 04 01 \tsha256msg2 (%ecx,%eax,1),%xmm0",},
589{{0x0f, 0x38, 0xcd, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
590"0f 38 cd 04 05 78 56 34 12 \tsha256msg2 0x12345678(,%eax,1),%xmm0",},
591{{0x0f, 0x38, 0xcd, 0x04, 0x08, }, 5, 0, "", "",
592"0f 38 cd 04 08 \tsha256msg2 (%eax,%ecx,1),%xmm0",},
593{{0x0f, 0x38, 0xcd, 0x04, 0xc8, }, 5, 0, "", "",
594"0f 38 cd 04 c8 \tsha256msg2 (%eax,%ecx,8),%xmm0",},
595{{0x0f, 0x38, 0xcd, 0x40, 0x12, }, 5, 0, "", "",
596"0f 38 cd 40 12 \tsha256msg2 0x12(%eax),%xmm0",},
597{{0x0f, 0x38, 0xcd, 0x45, 0x12, }, 5, 0, "", "",
598"0f 38 cd 45 12 \tsha256msg2 0x12(%ebp),%xmm0",},
599{{0x0f, 0x38, 0xcd, 0x44, 0x01, 0x12, }, 6, 0, "", "",
600"0f 38 cd 44 01 12 \tsha256msg2 0x12(%ecx,%eax,1),%xmm0",},
601{{0x0f, 0x38, 0xcd, 0x44, 0x05, 0x12, }, 6, 0, "", "",
602"0f 38 cd 44 05 12 \tsha256msg2 0x12(%ebp,%eax,1),%xmm0",},
603{{0x0f, 0x38, 0xcd, 0x44, 0x08, 0x12, }, 6, 0, "", "",
604"0f 38 cd 44 08 12 \tsha256msg2 0x12(%eax,%ecx,1),%xmm0",},
605{{0x0f, 0x38, 0xcd, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
606"0f 38 cd 44 c8 12 \tsha256msg2 0x12(%eax,%ecx,8),%xmm0",},
607{{0x0f, 0x38, 0xcd, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
608"0f 38 cd 80 78 56 34 12 \tsha256msg2 0x12345678(%eax),%xmm0",},
609{{0x0f, 0x38, 0xcd, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
610"0f 38 cd 85 78 56 34 12 \tsha256msg2 0x12345678(%ebp),%xmm0",},
611{{0x0f, 0x38, 0xcd, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
612"0f 38 cd 84 01 78 56 34 12 \tsha256msg2 0x12345678(%ecx,%eax,1),%xmm0",},
613{{0x0f, 0x38, 0xcd, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
614"0f 38 cd 84 05 78 56 34 12 \tsha256msg2 0x12345678(%ebp,%eax,1),%xmm0",},
615{{0x0f, 0x38, 0xcd, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
616"0f 38 cd 84 08 78 56 34 12 \tsha256msg2 0x12345678(%eax,%ecx,1),%xmm0",},
617{{0x0f, 0x38, 0xcd, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
618"0f 38 cd 84 c8 78 56 34 12 \tsha256msg2 0x12345678(%eax,%ecx,8),%xmm0",},
619{{0x66, 0x0f, 0xae, 0x38, }, 4, 0, "", "",
620"66 0f ae 38 \tclflushopt (%eax)",},
621{{0x66, 0x0f, 0xae, 0x3d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
622"66 0f ae 3d 78 56 34 12 \tclflushopt 0x12345678",},
623{{0x66, 0x0f, 0xae, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
624"66 0f ae bc c8 78 56 34 12 \tclflushopt 0x12345678(%eax,%ecx,8)",},
625{{0x0f, 0xae, 0x38, }, 3, 0, "", "",
626"0f ae 38 \tclflush (%eax)",},
627{{0x0f, 0xae, 0xf8, }, 3, 0, "", "",
628"0f ae f8 \tsfence ",},
629{{0x66, 0x0f, 0xae, 0x30, }, 4, 0, "", "",
630"66 0f ae 30 \tclwb (%eax)",},
631{{0x66, 0x0f, 0xae, 0x35, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
632"66 0f ae 35 78 56 34 12 \tclwb 0x12345678",},
633{{0x66, 0x0f, 0xae, 0xb4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
634"66 0f ae b4 c8 78 56 34 12 \tclwb 0x12345678(%eax,%ecx,8)",},
635{{0x0f, 0xae, 0x30, }, 3, 0, "", "",
636"0f ae 30 \txsaveopt (%eax)",},
637{{0x0f, 0xae, 0xf0, }, 3, 0, "", "",
638"0f ae f0 \tmfence ",},
639{{0x0f, 0xc7, 0x20, }, 3, 0, "", "",
640"0f c7 20 \txsavec (%eax)",},
641{{0x0f, 0xc7, 0x25, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
642"0f c7 25 78 56 34 12 \txsavec 0x12345678",},
643{{0x0f, 0xc7, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
644"0f c7 a4 c8 78 56 34 12 \txsavec 0x12345678(%eax,%ecx,8)",},
645{{0x0f, 0xc7, 0x28, }, 3, 0, "", "",
646"0f c7 28 \txsaves (%eax)",},
647{{0x0f, 0xc7, 0x2d, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
648"0f c7 2d 78 56 34 12 \txsaves 0x12345678",},
649{{0x0f, 0xc7, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
650"0f c7 ac c8 78 56 34 12 \txsaves 0x12345678(%eax,%ecx,8)",},
651{{0x0f, 0xc7, 0x18, }, 3, 0, "", "",
652"0f c7 18 \txrstors (%eax)",},
653{{0x0f, 0xc7, 0x1d, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
654"0f c7 1d 78 56 34 12 \txrstors 0x12345678",},
655{{0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
656"0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%eax,%ecx,8)",},
657{{0x66, 0x0f, 0xae, 0xf8, }, 4, 0, "", "",
658"66 0f ae f8 \tpcommit ",},
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-64.c b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
new file mode 100644
index 000000000000..4fe7cce179c4
--- /dev/null
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
@@ -0,0 +1,768 @@
1/*
2 * Generated by gen-insn-x86-dat.sh and gen-insn-x86-dat.awk
3 * from insn-x86-dat-src.c for inclusion by insn-x86.c
4 * Do not change this code.
5*/
6
7{{0x0f, 0x31, }, 2, 0, "", "",
8"0f 31 \trdtsc ",},
9{{0xf3, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
10"f3 0f 1b 00 \tbndmk (%rax),%bnd0",},
11{{0xf3, 0x41, 0x0f, 0x1b, 0x00, }, 5, 0, "", "",
12"f3 41 0f 1b 00 \tbndmk (%r8),%bnd0",},
13{{0xf3, 0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
14"f3 0f 1b 04 25 78 56 34 12 \tbndmk 0x12345678,%bnd0",},
15{{0xf3, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
16"f3 0f 1b 18 \tbndmk (%rax),%bnd3",},
17{{0xf3, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
18"f3 0f 1b 04 01 \tbndmk (%rcx,%rax,1),%bnd0",},
19{{0xf3, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
20"f3 0f 1b 04 05 78 56 34 12 \tbndmk 0x12345678(,%rax,1),%bnd0",},
21{{0xf3, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
22"f3 0f 1b 04 08 \tbndmk (%rax,%rcx,1),%bnd0",},
23{{0xf3, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
24"f3 0f 1b 04 c8 \tbndmk (%rax,%rcx,8),%bnd0",},
25{{0xf3, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
26"f3 0f 1b 40 12 \tbndmk 0x12(%rax),%bnd0",},
27{{0xf3, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
28"f3 0f 1b 45 12 \tbndmk 0x12(%rbp),%bnd0",},
29{{0xf3, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
30"f3 0f 1b 44 01 12 \tbndmk 0x12(%rcx,%rax,1),%bnd0",},
31{{0xf3, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
32"f3 0f 1b 44 05 12 \tbndmk 0x12(%rbp,%rax,1),%bnd0",},
33{{0xf3, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
34"f3 0f 1b 44 08 12 \tbndmk 0x12(%rax,%rcx,1),%bnd0",},
35{{0xf3, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
36"f3 0f 1b 44 c8 12 \tbndmk 0x12(%rax,%rcx,8),%bnd0",},
37{{0xf3, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
38"f3 0f 1b 80 78 56 34 12 \tbndmk 0x12345678(%rax),%bnd0",},
39{{0xf3, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
40"f3 0f 1b 85 78 56 34 12 \tbndmk 0x12345678(%rbp),%bnd0",},
41{{0xf3, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
42"f3 0f 1b 84 01 78 56 34 12 \tbndmk 0x12345678(%rcx,%rax,1),%bnd0",},
43{{0xf3, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
44"f3 0f 1b 84 05 78 56 34 12 \tbndmk 0x12345678(%rbp,%rax,1),%bnd0",},
45{{0xf3, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
46"f3 0f 1b 84 08 78 56 34 12 \tbndmk 0x12345678(%rax,%rcx,1),%bnd0",},
47{{0xf3, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
48"f3 0f 1b 84 c8 78 56 34 12 \tbndmk 0x12345678(%rax,%rcx,8),%bnd0",},
49{{0xf3, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
50"f3 0f 1a 00 \tbndcl (%rax),%bnd0",},
51{{0xf3, 0x41, 0x0f, 0x1a, 0x00, }, 5, 0, "", "",
52"f3 41 0f 1a 00 \tbndcl (%r8),%bnd0",},
53{{0xf3, 0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
54"f3 0f 1a 04 25 78 56 34 12 \tbndcl 0x12345678,%bnd0",},
55{{0xf3, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
56"f3 0f 1a 18 \tbndcl (%rax),%bnd3",},
57{{0xf3, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
58"f3 0f 1a 04 01 \tbndcl (%rcx,%rax,1),%bnd0",},
59{{0xf3, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
60"f3 0f 1a 04 05 78 56 34 12 \tbndcl 0x12345678(,%rax,1),%bnd0",},
61{{0xf3, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
62"f3 0f 1a 04 08 \tbndcl (%rax,%rcx,1),%bnd0",},
63{{0xf3, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
64"f3 0f 1a 04 c8 \tbndcl (%rax,%rcx,8),%bnd0",},
65{{0xf3, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
66"f3 0f 1a 40 12 \tbndcl 0x12(%rax),%bnd0",},
67{{0xf3, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
68"f3 0f 1a 45 12 \tbndcl 0x12(%rbp),%bnd0",},
69{{0xf3, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
70"f3 0f 1a 44 01 12 \tbndcl 0x12(%rcx,%rax,1),%bnd0",},
71{{0xf3, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
72"f3 0f 1a 44 05 12 \tbndcl 0x12(%rbp,%rax,1),%bnd0",},
73{{0xf3, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
74"f3 0f 1a 44 08 12 \tbndcl 0x12(%rax,%rcx,1),%bnd0",},
75{{0xf3, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
76"f3 0f 1a 44 c8 12 \tbndcl 0x12(%rax,%rcx,8),%bnd0",},
77{{0xf3, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
78"f3 0f 1a 80 78 56 34 12 \tbndcl 0x12345678(%rax),%bnd0",},
79{{0xf3, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
80"f3 0f 1a 85 78 56 34 12 \tbndcl 0x12345678(%rbp),%bnd0",},
81{{0xf3, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
82"f3 0f 1a 84 01 78 56 34 12 \tbndcl 0x12345678(%rcx,%rax,1),%bnd0",},
83{{0xf3, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
84"f3 0f 1a 84 05 78 56 34 12 \tbndcl 0x12345678(%rbp,%rax,1),%bnd0",},
85{{0xf3, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
86"f3 0f 1a 84 08 78 56 34 12 \tbndcl 0x12345678(%rax,%rcx,1),%bnd0",},
87{{0xf3, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
88"f3 0f 1a 84 c8 78 56 34 12 \tbndcl 0x12345678(%rax,%rcx,8),%bnd0",},
89{{0xf3, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
90"f3 0f 1a c0 \tbndcl %rax,%bnd0",},
91{{0xf2, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
92"f2 0f 1a 00 \tbndcu (%rax),%bnd0",},
93{{0xf2, 0x41, 0x0f, 0x1a, 0x00, }, 5, 0, "", "",
94"f2 41 0f 1a 00 \tbndcu (%r8),%bnd0",},
95{{0xf2, 0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
96"f2 0f 1a 04 25 78 56 34 12 \tbndcu 0x12345678,%bnd0",},
97{{0xf2, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
98"f2 0f 1a 18 \tbndcu (%rax),%bnd3",},
99{{0xf2, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
100"f2 0f 1a 04 01 \tbndcu (%rcx,%rax,1),%bnd0",},
101{{0xf2, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
102"f2 0f 1a 04 05 78 56 34 12 \tbndcu 0x12345678(,%rax,1),%bnd0",},
103{{0xf2, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
104"f2 0f 1a 04 08 \tbndcu (%rax,%rcx,1),%bnd0",},
105{{0xf2, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
106"f2 0f 1a 04 c8 \tbndcu (%rax,%rcx,8),%bnd0",},
107{{0xf2, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
108"f2 0f 1a 40 12 \tbndcu 0x12(%rax),%bnd0",},
109{{0xf2, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
110"f2 0f 1a 45 12 \tbndcu 0x12(%rbp),%bnd0",},
111{{0xf2, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
112"f2 0f 1a 44 01 12 \tbndcu 0x12(%rcx,%rax,1),%bnd0",},
113{{0xf2, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
114"f2 0f 1a 44 05 12 \tbndcu 0x12(%rbp,%rax,1),%bnd0",},
115{{0xf2, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
116"f2 0f 1a 44 08 12 \tbndcu 0x12(%rax,%rcx,1),%bnd0",},
117{{0xf2, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
118"f2 0f 1a 44 c8 12 \tbndcu 0x12(%rax,%rcx,8),%bnd0",},
119{{0xf2, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
120"f2 0f 1a 80 78 56 34 12 \tbndcu 0x12345678(%rax),%bnd0",},
121{{0xf2, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
122"f2 0f 1a 85 78 56 34 12 \tbndcu 0x12345678(%rbp),%bnd0",},
123{{0xf2, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
124"f2 0f 1a 84 01 78 56 34 12 \tbndcu 0x12345678(%rcx,%rax,1),%bnd0",},
125{{0xf2, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
126"f2 0f 1a 84 05 78 56 34 12 \tbndcu 0x12345678(%rbp,%rax,1),%bnd0",},
127{{0xf2, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
128"f2 0f 1a 84 08 78 56 34 12 \tbndcu 0x12345678(%rax,%rcx,1),%bnd0",},
129{{0xf2, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
130"f2 0f 1a 84 c8 78 56 34 12 \tbndcu 0x12345678(%rax,%rcx,8),%bnd0",},
131{{0xf2, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
132"f2 0f 1a c0 \tbndcu %rax,%bnd0",},
133{{0xf2, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
134"f2 0f 1b 00 \tbndcn (%rax),%bnd0",},
135{{0xf2, 0x41, 0x0f, 0x1b, 0x00, }, 5, 0, "", "",
136"f2 41 0f 1b 00 \tbndcn (%r8),%bnd0",},
137{{0xf2, 0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
138"f2 0f 1b 04 25 78 56 34 12 \tbndcn 0x12345678,%bnd0",},
139{{0xf2, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
140"f2 0f 1b 18 \tbndcn (%rax),%bnd3",},
141{{0xf2, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
142"f2 0f 1b 04 01 \tbndcn (%rcx,%rax,1),%bnd0",},
143{{0xf2, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
144"f2 0f 1b 04 05 78 56 34 12 \tbndcn 0x12345678(,%rax,1),%bnd0",},
145{{0xf2, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
146"f2 0f 1b 04 08 \tbndcn (%rax,%rcx,1),%bnd0",},
147{{0xf2, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
148"f2 0f 1b 04 c8 \tbndcn (%rax,%rcx,8),%bnd0",},
149{{0xf2, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
150"f2 0f 1b 40 12 \tbndcn 0x12(%rax),%bnd0",},
151{{0xf2, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
152"f2 0f 1b 45 12 \tbndcn 0x12(%rbp),%bnd0",},
153{{0xf2, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
154"f2 0f 1b 44 01 12 \tbndcn 0x12(%rcx,%rax,1),%bnd0",},
155{{0xf2, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
156"f2 0f 1b 44 05 12 \tbndcn 0x12(%rbp,%rax,1),%bnd0",},
157{{0xf2, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
158"f2 0f 1b 44 08 12 \tbndcn 0x12(%rax,%rcx,1),%bnd0",},
159{{0xf2, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
160"f2 0f 1b 44 c8 12 \tbndcn 0x12(%rax,%rcx,8),%bnd0",},
161{{0xf2, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
162"f2 0f 1b 80 78 56 34 12 \tbndcn 0x12345678(%rax),%bnd0",},
163{{0xf2, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
164"f2 0f 1b 85 78 56 34 12 \tbndcn 0x12345678(%rbp),%bnd0",},
165{{0xf2, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
166"f2 0f 1b 84 01 78 56 34 12 \tbndcn 0x12345678(%rcx,%rax,1),%bnd0",},
167{{0xf2, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
168"f2 0f 1b 84 05 78 56 34 12 \tbndcn 0x12345678(%rbp,%rax,1),%bnd0",},
169{{0xf2, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
170"f2 0f 1b 84 08 78 56 34 12 \tbndcn 0x12345678(%rax,%rcx,1),%bnd0",},
171{{0xf2, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
172"f2 0f 1b 84 c8 78 56 34 12 \tbndcn 0x12345678(%rax,%rcx,8),%bnd0",},
173{{0xf2, 0x0f, 0x1b, 0xc0, }, 4, 0, "", "",
174"f2 0f 1b c0 \tbndcn %rax,%bnd0",},
175{{0x66, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
176"66 0f 1a 00 \tbndmov (%rax),%bnd0",},
177{{0x66, 0x41, 0x0f, 0x1a, 0x00, }, 5, 0, "", "",
178"66 41 0f 1a 00 \tbndmov (%r8),%bnd0",},
179{{0x66, 0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
180"66 0f 1a 04 25 78 56 34 12 \tbndmov 0x12345678,%bnd0",},
181{{0x66, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
182"66 0f 1a 18 \tbndmov (%rax),%bnd3",},
183{{0x66, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
184"66 0f 1a 04 01 \tbndmov (%rcx,%rax,1),%bnd0",},
185{{0x66, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
186"66 0f 1a 04 05 78 56 34 12 \tbndmov 0x12345678(,%rax,1),%bnd0",},
187{{0x66, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
188"66 0f 1a 04 08 \tbndmov (%rax,%rcx,1),%bnd0",},
189{{0x66, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
190"66 0f 1a 04 c8 \tbndmov (%rax,%rcx,8),%bnd0",},
191{{0x66, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
192"66 0f 1a 40 12 \tbndmov 0x12(%rax),%bnd0",},
193{{0x66, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
194"66 0f 1a 45 12 \tbndmov 0x12(%rbp),%bnd0",},
195{{0x66, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
196"66 0f 1a 44 01 12 \tbndmov 0x12(%rcx,%rax,1),%bnd0",},
197{{0x66, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
198"66 0f 1a 44 05 12 \tbndmov 0x12(%rbp,%rax,1),%bnd0",},
199{{0x66, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
200"66 0f 1a 44 08 12 \tbndmov 0x12(%rax,%rcx,1),%bnd0",},
201{{0x66, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
202"66 0f 1a 44 c8 12 \tbndmov 0x12(%rax,%rcx,8),%bnd0",},
203{{0x66, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
204"66 0f 1a 80 78 56 34 12 \tbndmov 0x12345678(%rax),%bnd0",},
205{{0x66, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
206"66 0f 1a 85 78 56 34 12 \tbndmov 0x12345678(%rbp),%bnd0",},
207{{0x66, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
208"66 0f 1a 84 01 78 56 34 12 \tbndmov 0x12345678(%rcx,%rax,1),%bnd0",},
209{{0x66, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
210"66 0f 1a 84 05 78 56 34 12 \tbndmov 0x12345678(%rbp,%rax,1),%bnd0",},
211{{0x66, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
212"66 0f 1a 84 08 78 56 34 12 \tbndmov 0x12345678(%rax,%rcx,1),%bnd0",},
213{{0x66, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
214"66 0f 1a 84 c8 78 56 34 12 \tbndmov 0x12345678(%rax,%rcx,8),%bnd0",},
215{{0x66, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
216"66 0f 1b 00 \tbndmov %bnd0,(%rax)",},
217{{0x66, 0x41, 0x0f, 0x1b, 0x00, }, 5, 0, "", "",
218"66 41 0f 1b 00 \tbndmov %bnd0,(%r8)",},
219{{0x66, 0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
220"66 0f 1b 04 25 78 56 34 12 \tbndmov %bnd0,0x12345678",},
221{{0x66, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
222"66 0f 1b 18 \tbndmov %bnd3,(%rax)",},
223{{0x66, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
224"66 0f 1b 04 01 \tbndmov %bnd0,(%rcx,%rax,1)",},
225{{0x66, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
226"66 0f 1b 04 05 78 56 34 12 \tbndmov %bnd0,0x12345678(,%rax,1)",},
227{{0x66, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
228"66 0f 1b 04 08 \tbndmov %bnd0,(%rax,%rcx,1)",},
229{{0x66, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
230"66 0f 1b 04 c8 \tbndmov %bnd0,(%rax,%rcx,8)",},
231{{0x66, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
232"66 0f 1b 40 12 \tbndmov %bnd0,0x12(%rax)",},
233{{0x66, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
234"66 0f 1b 45 12 \tbndmov %bnd0,0x12(%rbp)",},
235{{0x66, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
236"66 0f 1b 44 01 12 \tbndmov %bnd0,0x12(%rcx,%rax,1)",},
237{{0x66, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
238"66 0f 1b 44 05 12 \tbndmov %bnd0,0x12(%rbp,%rax,1)",},
239{{0x66, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
240"66 0f 1b 44 08 12 \tbndmov %bnd0,0x12(%rax,%rcx,1)",},
241{{0x66, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
242"66 0f 1b 44 c8 12 \tbndmov %bnd0,0x12(%rax,%rcx,8)",},
243{{0x66, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
244"66 0f 1b 80 78 56 34 12 \tbndmov %bnd0,0x12345678(%rax)",},
245{{0x66, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
246"66 0f 1b 85 78 56 34 12 \tbndmov %bnd0,0x12345678(%rbp)",},
247{{0x66, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
248"66 0f 1b 84 01 78 56 34 12 \tbndmov %bnd0,0x12345678(%rcx,%rax,1)",},
249{{0x66, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
250"66 0f 1b 84 05 78 56 34 12 \tbndmov %bnd0,0x12345678(%rbp,%rax,1)",},
251{{0x66, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
252"66 0f 1b 84 08 78 56 34 12 \tbndmov %bnd0,0x12345678(%rax,%rcx,1)",},
253{{0x66, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
254"66 0f 1b 84 c8 78 56 34 12 \tbndmov %bnd0,0x12345678(%rax,%rcx,8)",},
255{{0x66, 0x0f, 0x1a, 0xc8, }, 4, 0, "", "",
256"66 0f 1a c8 \tbndmov %bnd0,%bnd1",},
257{{0x66, 0x0f, 0x1a, 0xc1, }, 4, 0, "", "",
258"66 0f 1a c1 \tbndmov %bnd1,%bnd0",},
259{{0x0f, 0x1a, 0x00, }, 3, 0, "", "",
260"0f 1a 00 \tbndldx (%rax),%bnd0",},
261{{0x41, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
262"41 0f 1a 00 \tbndldx (%r8),%bnd0",},
263{{0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
264"0f 1a 04 25 78 56 34 12 \tbndldx 0x12345678,%bnd0",},
265{{0x0f, 0x1a, 0x18, }, 3, 0, "", "",
266"0f 1a 18 \tbndldx (%rax),%bnd3",},
267{{0x0f, 0x1a, 0x04, 0x01, }, 4, 0, "", "",
268"0f 1a 04 01 \tbndldx (%rcx,%rax,1),%bnd0",},
269{{0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
270"0f 1a 04 05 78 56 34 12 \tbndldx 0x12345678(,%rax,1),%bnd0",},
271{{0x0f, 0x1a, 0x04, 0x08, }, 4, 0, "", "",
272"0f 1a 04 08 \tbndldx (%rax,%rcx,1),%bnd0",},
273{{0x0f, 0x1a, 0x40, 0x12, }, 4, 0, "", "",
274"0f 1a 40 12 \tbndldx 0x12(%rax),%bnd0",},
275{{0x0f, 0x1a, 0x45, 0x12, }, 4, 0, "", "",
276"0f 1a 45 12 \tbndldx 0x12(%rbp),%bnd0",},
277{{0x0f, 0x1a, 0x44, 0x01, 0x12, }, 5, 0, "", "",
278"0f 1a 44 01 12 \tbndldx 0x12(%rcx,%rax,1),%bnd0",},
279{{0x0f, 0x1a, 0x44, 0x05, 0x12, }, 5, 0, "", "",
280"0f 1a 44 05 12 \tbndldx 0x12(%rbp,%rax,1),%bnd0",},
281{{0x0f, 0x1a, 0x44, 0x08, 0x12, }, 5, 0, "", "",
282"0f 1a 44 08 12 \tbndldx 0x12(%rax,%rcx,1),%bnd0",},
283{{0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
284"0f 1a 80 78 56 34 12 \tbndldx 0x12345678(%rax),%bnd0",},
285{{0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
286"0f 1a 85 78 56 34 12 \tbndldx 0x12345678(%rbp),%bnd0",},
287{{0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
288"0f 1a 84 01 78 56 34 12 \tbndldx 0x12345678(%rcx,%rax,1),%bnd0",},
289{{0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
290"0f 1a 84 05 78 56 34 12 \tbndldx 0x12345678(%rbp,%rax,1),%bnd0",},
291{{0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
292"0f 1a 84 08 78 56 34 12 \tbndldx 0x12345678(%rax,%rcx,1),%bnd0",},
293{{0x0f, 0x1b, 0x00, }, 3, 0, "", "",
294"0f 1b 00 \tbndstx %bnd0,(%rax)",},
295{{0x41, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
296"41 0f 1b 00 \tbndstx %bnd0,(%r8)",},
297{{0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
298"0f 1b 04 25 78 56 34 12 \tbndstx %bnd0,0x12345678",},
299{{0x0f, 0x1b, 0x18, }, 3, 0, "", "",
300"0f 1b 18 \tbndstx %bnd3,(%rax)",},
301{{0x0f, 0x1b, 0x04, 0x01, }, 4, 0, "", "",
302"0f 1b 04 01 \tbndstx %bnd0,(%rcx,%rax,1)",},
303{{0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
304"0f 1b 04 05 78 56 34 12 \tbndstx %bnd0,0x12345678(,%rax,1)",},
305{{0x0f, 0x1b, 0x04, 0x08, }, 4, 0, "", "",
306"0f 1b 04 08 \tbndstx %bnd0,(%rax,%rcx,1)",},
307{{0x0f, 0x1b, 0x40, 0x12, }, 4, 0, "", "",
308"0f 1b 40 12 \tbndstx %bnd0,0x12(%rax)",},
309{{0x0f, 0x1b, 0x45, 0x12, }, 4, 0, "", "",
310"0f 1b 45 12 \tbndstx %bnd0,0x12(%rbp)",},
311{{0x0f, 0x1b, 0x44, 0x01, 0x12, }, 5, 0, "", "",
312"0f 1b 44 01 12 \tbndstx %bnd0,0x12(%rcx,%rax,1)",},
313{{0x0f, 0x1b, 0x44, 0x05, 0x12, }, 5, 0, "", "",
314"0f 1b 44 05 12 \tbndstx %bnd0,0x12(%rbp,%rax,1)",},
315{{0x0f, 0x1b, 0x44, 0x08, 0x12, }, 5, 0, "", "",
316"0f 1b 44 08 12 \tbndstx %bnd0,0x12(%rax,%rcx,1)",},
317{{0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
318"0f 1b 80 78 56 34 12 \tbndstx %bnd0,0x12345678(%rax)",},
319{{0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
320"0f 1b 85 78 56 34 12 \tbndstx %bnd0,0x12345678(%rbp)",},
321{{0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
322"0f 1b 84 01 78 56 34 12 \tbndstx %bnd0,0x12345678(%rcx,%rax,1)",},
323{{0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
324"0f 1b 84 05 78 56 34 12 \tbndstx %bnd0,0x12345678(%rbp,%rax,1)",},
325{{0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
326"0f 1b 84 08 78 56 34 12 \tbndstx %bnd0,0x12345678(%rax,%rcx,1)",},
327{{0xf2, 0xe8, 0x00, 0x00, 0x00, 0x00, }, 6, 0, "call", "unconditional",
328"f2 e8 00 00 00 00 \tbnd callq 3f6 <main+0x3f6>",},
329{{0x67, 0xf2, 0xff, 0x10, }, 4, 0, "call", "indirect",
330"67 f2 ff 10 \tbnd callq *(%eax)",},
331{{0xf2, 0xc3, }, 2, 0, "ret", "indirect",
332"f2 c3 \tbnd retq ",},
333{{0xf2, 0xe9, 0x00, 0x00, 0x00, 0x00, }, 6, 0, "jmp", "unconditional",
334"f2 e9 00 00 00 00 \tbnd jmpq 402 <main+0x402>",},
335{{0xf2, 0xe9, 0x00, 0x00, 0x00, 0x00, }, 6, 0, "jmp", "unconditional",
336"f2 e9 00 00 00 00 \tbnd jmpq 408 <main+0x408>",},
337{{0x67, 0xf2, 0xff, 0x21, }, 4, 0, "jmp", "indirect",
338"67 f2 ff 21 \tbnd jmpq *(%ecx)",},
339{{0xf2, 0x0f, 0x85, 0x00, 0x00, 0x00, 0x00, }, 7, 0, "jcc", "conditional",
340"f2 0f 85 00 00 00 00 \tbnd jne 413 <main+0x413>",},
341{{0x0f, 0x3a, 0xcc, 0xc1, 0x00, }, 5, 0, "", "",
342"0f 3a cc c1 00 \tsha1rnds4 $0x0,%xmm1,%xmm0",},
343{{0x0f, 0x3a, 0xcc, 0xd7, 0x91, }, 5, 0, "", "",
344"0f 3a cc d7 91 \tsha1rnds4 $0x91,%xmm7,%xmm2",},
345{{0x41, 0x0f, 0x3a, 0xcc, 0xc0, 0x91, }, 6, 0, "", "",
346"41 0f 3a cc c0 91 \tsha1rnds4 $0x91,%xmm8,%xmm0",},
347{{0x44, 0x0f, 0x3a, 0xcc, 0xc7, 0x91, }, 6, 0, "", "",
348"44 0f 3a cc c7 91 \tsha1rnds4 $0x91,%xmm7,%xmm8",},
349{{0x45, 0x0f, 0x3a, 0xcc, 0xc7, 0x91, }, 6, 0, "", "",
350"45 0f 3a cc c7 91 \tsha1rnds4 $0x91,%xmm15,%xmm8",},
351{{0x0f, 0x3a, 0xcc, 0x00, 0x91, }, 5, 0, "", "",
352"0f 3a cc 00 91 \tsha1rnds4 $0x91,(%rax),%xmm0",},
353{{0x41, 0x0f, 0x3a, 0xcc, 0x00, 0x91, }, 6, 0, "", "",
354"41 0f 3a cc 00 91 \tsha1rnds4 $0x91,(%r8),%xmm0",},
355{{0x0f, 0x3a, 0xcc, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
356"0f 3a cc 04 25 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678,%xmm0",},
357{{0x0f, 0x3a, 0xcc, 0x18, 0x91, }, 5, 0, "", "",
358"0f 3a cc 18 91 \tsha1rnds4 $0x91,(%rax),%xmm3",},
359{{0x0f, 0x3a, 0xcc, 0x04, 0x01, 0x91, }, 6, 0, "", "",
360"0f 3a cc 04 01 91 \tsha1rnds4 $0x91,(%rcx,%rax,1),%xmm0",},
361{{0x0f, 0x3a, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
362"0f 3a cc 04 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(,%rax,1),%xmm0",},
363{{0x0f, 0x3a, 0xcc, 0x04, 0x08, 0x91, }, 6, 0, "", "",
364"0f 3a cc 04 08 91 \tsha1rnds4 $0x91,(%rax,%rcx,1),%xmm0",},
365{{0x0f, 0x3a, 0xcc, 0x04, 0xc8, 0x91, }, 6, 0, "", "",
366"0f 3a cc 04 c8 91 \tsha1rnds4 $0x91,(%rax,%rcx,8),%xmm0",},
367{{0x0f, 0x3a, 0xcc, 0x40, 0x12, 0x91, }, 6, 0, "", "",
368"0f 3a cc 40 12 91 \tsha1rnds4 $0x91,0x12(%rax),%xmm0",},
369{{0x0f, 0x3a, 0xcc, 0x45, 0x12, 0x91, }, 6, 0, "", "",
370"0f 3a cc 45 12 91 \tsha1rnds4 $0x91,0x12(%rbp),%xmm0",},
371{{0x0f, 0x3a, 0xcc, 0x44, 0x01, 0x12, 0x91, }, 7, 0, "", "",
372"0f 3a cc 44 01 12 91 \tsha1rnds4 $0x91,0x12(%rcx,%rax,1),%xmm0",},
373{{0x0f, 0x3a, 0xcc, 0x44, 0x05, 0x12, 0x91, }, 7, 0, "", "",
374"0f 3a cc 44 05 12 91 \tsha1rnds4 $0x91,0x12(%rbp,%rax,1),%xmm0",},
375{{0x0f, 0x3a, 0xcc, 0x44, 0x08, 0x12, 0x91, }, 7, 0, "", "",
376"0f 3a cc 44 08 12 91 \tsha1rnds4 $0x91,0x12(%rax,%rcx,1),%xmm0",},
377{{0x0f, 0x3a, 0xcc, 0x44, 0xc8, 0x12, 0x91, }, 7, 0, "", "",
378"0f 3a cc 44 c8 12 91 \tsha1rnds4 $0x91,0x12(%rax,%rcx,8),%xmm0",},
379{{0x0f, 0x3a, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
380"0f 3a cc 80 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax),%xmm0",},
381{{0x0f, 0x3a, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
382"0f 3a cc 85 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rbp),%xmm0",},
383{{0x0f, 0x3a, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
384"0f 3a cc 84 01 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rcx,%rax,1),%xmm0",},
385{{0x0f, 0x3a, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
386"0f 3a cc 84 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rbp,%rax,1),%xmm0",},
387{{0x0f, 0x3a, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
388"0f 3a cc 84 08 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax,%rcx,1),%xmm0",},
389{{0x0f, 0x3a, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
390"0f 3a cc 84 c8 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax,%rcx,8),%xmm0",},
391{{0x44, 0x0f, 0x3a, 0xcc, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x91, }, 11, 0, "", "",
392"44 0f 3a cc bc c8 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax,%rcx,8),%xmm15",},
393{{0x0f, 0x38, 0xc8, 0xc1, }, 4, 0, "", "",
394"0f 38 c8 c1 \tsha1nexte %xmm1,%xmm0",},
395{{0x0f, 0x38, 0xc8, 0xd7, }, 4, 0, "", "",
396"0f 38 c8 d7 \tsha1nexte %xmm7,%xmm2",},
397{{0x41, 0x0f, 0x38, 0xc8, 0xc0, }, 5, 0, "", "",
398"41 0f 38 c8 c0 \tsha1nexte %xmm8,%xmm0",},
399{{0x44, 0x0f, 0x38, 0xc8, 0xc7, }, 5, 0, "", "",
400"44 0f 38 c8 c7 \tsha1nexte %xmm7,%xmm8",},
401{{0x45, 0x0f, 0x38, 0xc8, 0xc7, }, 5, 0, "", "",
402"45 0f 38 c8 c7 \tsha1nexte %xmm15,%xmm8",},
403{{0x0f, 0x38, 0xc8, 0x00, }, 4, 0, "", "",
404"0f 38 c8 00 \tsha1nexte (%rax),%xmm0",},
405{{0x41, 0x0f, 0x38, 0xc8, 0x00, }, 5, 0, "", "",
406"41 0f 38 c8 00 \tsha1nexte (%r8),%xmm0",},
407{{0x0f, 0x38, 0xc8, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
408"0f 38 c8 04 25 78 56 34 12 \tsha1nexte 0x12345678,%xmm0",},
409{{0x0f, 0x38, 0xc8, 0x18, }, 4, 0, "", "",
410"0f 38 c8 18 \tsha1nexte (%rax),%xmm3",},
411{{0x0f, 0x38, 0xc8, 0x04, 0x01, }, 5, 0, "", "",
412"0f 38 c8 04 01 \tsha1nexte (%rcx,%rax,1),%xmm0",},
413{{0x0f, 0x38, 0xc8, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
414"0f 38 c8 04 05 78 56 34 12 \tsha1nexte 0x12345678(,%rax,1),%xmm0",},
415{{0x0f, 0x38, 0xc8, 0x04, 0x08, }, 5, 0, "", "",
416"0f 38 c8 04 08 \tsha1nexte (%rax,%rcx,1),%xmm0",},
417{{0x0f, 0x38, 0xc8, 0x04, 0xc8, }, 5, 0, "", "",
418"0f 38 c8 04 c8 \tsha1nexte (%rax,%rcx,8),%xmm0",},
419{{0x0f, 0x38, 0xc8, 0x40, 0x12, }, 5, 0, "", "",
420"0f 38 c8 40 12 \tsha1nexte 0x12(%rax),%xmm0",},
421{{0x0f, 0x38, 0xc8, 0x45, 0x12, }, 5, 0, "", "",
422"0f 38 c8 45 12 \tsha1nexte 0x12(%rbp),%xmm0",},
423{{0x0f, 0x38, 0xc8, 0x44, 0x01, 0x12, }, 6, 0, "", "",
424"0f 38 c8 44 01 12 \tsha1nexte 0x12(%rcx,%rax,1),%xmm0",},
425{{0x0f, 0x38, 0xc8, 0x44, 0x05, 0x12, }, 6, 0, "", "",
426"0f 38 c8 44 05 12 \tsha1nexte 0x12(%rbp,%rax,1),%xmm0",},
427{{0x0f, 0x38, 0xc8, 0x44, 0x08, 0x12, }, 6, 0, "", "",
428"0f 38 c8 44 08 12 \tsha1nexte 0x12(%rax,%rcx,1),%xmm0",},
429{{0x0f, 0x38, 0xc8, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
430"0f 38 c8 44 c8 12 \tsha1nexte 0x12(%rax,%rcx,8),%xmm0",},
431{{0x0f, 0x38, 0xc8, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
432"0f 38 c8 80 78 56 34 12 \tsha1nexte 0x12345678(%rax),%xmm0",},
433{{0x0f, 0x38, 0xc8, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
434"0f 38 c8 85 78 56 34 12 \tsha1nexte 0x12345678(%rbp),%xmm0",},
435{{0x0f, 0x38, 0xc8, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
436"0f 38 c8 84 01 78 56 34 12 \tsha1nexte 0x12345678(%rcx,%rax,1),%xmm0",},
437{{0x0f, 0x38, 0xc8, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
438"0f 38 c8 84 05 78 56 34 12 \tsha1nexte 0x12345678(%rbp,%rax,1),%xmm0",},
439{{0x0f, 0x38, 0xc8, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
440"0f 38 c8 84 08 78 56 34 12 \tsha1nexte 0x12345678(%rax,%rcx,1),%xmm0",},
441{{0x0f, 0x38, 0xc8, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
442"0f 38 c8 84 c8 78 56 34 12 \tsha1nexte 0x12345678(%rax,%rcx,8),%xmm0",},
443{{0x44, 0x0f, 0x38, 0xc8, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
444"44 0f 38 c8 bc c8 78 56 34 12 \tsha1nexte 0x12345678(%rax,%rcx,8),%xmm15",},
445{{0x0f, 0x38, 0xc9, 0xc1, }, 4, 0, "", "",
446"0f 38 c9 c1 \tsha1msg1 %xmm1,%xmm0",},
447{{0x0f, 0x38, 0xc9, 0xd7, }, 4, 0, "", "",
448"0f 38 c9 d7 \tsha1msg1 %xmm7,%xmm2",},
449{{0x41, 0x0f, 0x38, 0xc9, 0xc0, }, 5, 0, "", "",
450"41 0f 38 c9 c0 \tsha1msg1 %xmm8,%xmm0",},
451{{0x44, 0x0f, 0x38, 0xc9, 0xc7, }, 5, 0, "", "",
452"44 0f 38 c9 c7 \tsha1msg1 %xmm7,%xmm8",},
453{{0x45, 0x0f, 0x38, 0xc9, 0xc7, }, 5, 0, "", "",
454"45 0f 38 c9 c7 \tsha1msg1 %xmm15,%xmm8",},
455{{0x0f, 0x38, 0xc9, 0x00, }, 4, 0, "", "",
456"0f 38 c9 00 \tsha1msg1 (%rax),%xmm0",},
457{{0x41, 0x0f, 0x38, 0xc9, 0x00, }, 5, 0, "", "",
458"41 0f 38 c9 00 \tsha1msg1 (%r8),%xmm0",},
459{{0x0f, 0x38, 0xc9, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
460"0f 38 c9 04 25 78 56 34 12 \tsha1msg1 0x12345678,%xmm0",},
461{{0x0f, 0x38, 0xc9, 0x18, }, 4, 0, "", "",
462"0f 38 c9 18 \tsha1msg1 (%rax),%xmm3",},
463{{0x0f, 0x38, 0xc9, 0x04, 0x01, }, 5, 0, "", "",
464"0f 38 c9 04 01 \tsha1msg1 (%rcx,%rax,1),%xmm0",},
465{{0x0f, 0x38, 0xc9, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
466"0f 38 c9 04 05 78 56 34 12 \tsha1msg1 0x12345678(,%rax,1),%xmm0",},
467{{0x0f, 0x38, 0xc9, 0x04, 0x08, }, 5, 0, "", "",
468"0f 38 c9 04 08 \tsha1msg1 (%rax,%rcx,1),%xmm0",},
469{{0x0f, 0x38, 0xc9, 0x04, 0xc8, }, 5, 0, "", "",
470"0f 38 c9 04 c8 \tsha1msg1 (%rax,%rcx,8),%xmm0",},
471{{0x0f, 0x38, 0xc9, 0x40, 0x12, }, 5, 0, "", "",
472"0f 38 c9 40 12 \tsha1msg1 0x12(%rax),%xmm0",},
473{{0x0f, 0x38, 0xc9, 0x45, 0x12, }, 5, 0, "", "",
474"0f 38 c9 45 12 \tsha1msg1 0x12(%rbp),%xmm0",},
475{{0x0f, 0x38, 0xc9, 0x44, 0x01, 0x12, }, 6, 0, "", "",
476"0f 38 c9 44 01 12 \tsha1msg1 0x12(%rcx,%rax,1),%xmm0",},
477{{0x0f, 0x38, 0xc9, 0x44, 0x05, 0x12, }, 6, 0, "", "",
478"0f 38 c9 44 05 12 \tsha1msg1 0x12(%rbp,%rax,1),%xmm0",},
479{{0x0f, 0x38, 0xc9, 0x44, 0x08, 0x12, }, 6, 0, "", "",
480"0f 38 c9 44 08 12 \tsha1msg1 0x12(%rax,%rcx,1),%xmm0",},
481{{0x0f, 0x38, 0xc9, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
482"0f 38 c9 44 c8 12 \tsha1msg1 0x12(%rax,%rcx,8),%xmm0",},
483{{0x0f, 0x38, 0xc9, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
484"0f 38 c9 80 78 56 34 12 \tsha1msg1 0x12345678(%rax),%xmm0",},
485{{0x0f, 0x38, 0xc9, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
486"0f 38 c9 85 78 56 34 12 \tsha1msg1 0x12345678(%rbp),%xmm0",},
487{{0x0f, 0x38, 0xc9, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
488"0f 38 c9 84 01 78 56 34 12 \tsha1msg1 0x12345678(%rcx,%rax,1),%xmm0",},
489{{0x0f, 0x38, 0xc9, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
490"0f 38 c9 84 05 78 56 34 12 \tsha1msg1 0x12345678(%rbp,%rax,1),%xmm0",},
491{{0x0f, 0x38, 0xc9, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
492"0f 38 c9 84 08 78 56 34 12 \tsha1msg1 0x12345678(%rax,%rcx,1),%xmm0",},
493{{0x0f, 0x38, 0xc9, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
494"0f 38 c9 84 c8 78 56 34 12 \tsha1msg1 0x12345678(%rax,%rcx,8),%xmm0",},
495{{0x44, 0x0f, 0x38, 0xc9, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
496"44 0f 38 c9 bc c8 78 56 34 12 \tsha1msg1 0x12345678(%rax,%rcx,8),%xmm15",},
497{{0x0f, 0x38, 0xca, 0xc1, }, 4, 0, "", "",
498"0f 38 ca c1 \tsha1msg2 %xmm1,%xmm0",},
499{{0x0f, 0x38, 0xca, 0xd7, }, 4, 0, "", "",
500"0f 38 ca d7 \tsha1msg2 %xmm7,%xmm2",},
501{{0x41, 0x0f, 0x38, 0xca, 0xc0, }, 5, 0, "", "",
502"41 0f 38 ca c0 \tsha1msg2 %xmm8,%xmm0",},
503{{0x44, 0x0f, 0x38, 0xca, 0xc7, }, 5, 0, "", "",
504"44 0f 38 ca c7 \tsha1msg2 %xmm7,%xmm8",},
505{{0x45, 0x0f, 0x38, 0xca, 0xc7, }, 5, 0, "", "",
506"45 0f 38 ca c7 \tsha1msg2 %xmm15,%xmm8",},
507{{0x0f, 0x38, 0xca, 0x00, }, 4, 0, "", "",
508"0f 38 ca 00 \tsha1msg2 (%rax),%xmm0",},
509{{0x41, 0x0f, 0x38, 0xca, 0x00, }, 5, 0, "", "",
510"41 0f 38 ca 00 \tsha1msg2 (%r8),%xmm0",},
511{{0x0f, 0x38, 0xca, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
512"0f 38 ca 04 25 78 56 34 12 \tsha1msg2 0x12345678,%xmm0",},
513{{0x0f, 0x38, 0xca, 0x18, }, 4, 0, "", "",
514"0f 38 ca 18 \tsha1msg2 (%rax),%xmm3",},
515{{0x0f, 0x38, 0xca, 0x04, 0x01, }, 5, 0, "", "",
516"0f 38 ca 04 01 \tsha1msg2 (%rcx,%rax,1),%xmm0",},
517{{0x0f, 0x38, 0xca, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
518"0f 38 ca 04 05 78 56 34 12 \tsha1msg2 0x12345678(,%rax,1),%xmm0",},
519{{0x0f, 0x38, 0xca, 0x04, 0x08, }, 5, 0, "", "",
520"0f 38 ca 04 08 \tsha1msg2 (%rax,%rcx,1),%xmm0",},
521{{0x0f, 0x38, 0xca, 0x04, 0xc8, }, 5, 0, "", "",
522"0f 38 ca 04 c8 \tsha1msg2 (%rax,%rcx,8),%xmm0",},
523{{0x0f, 0x38, 0xca, 0x40, 0x12, }, 5, 0, "", "",
524"0f 38 ca 40 12 \tsha1msg2 0x12(%rax),%xmm0",},
525{{0x0f, 0x38, 0xca, 0x45, 0x12, }, 5, 0, "", "",
526"0f 38 ca 45 12 \tsha1msg2 0x12(%rbp),%xmm0",},
527{{0x0f, 0x38, 0xca, 0x44, 0x01, 0x12, }, 6, 0, "", "",
528"0f 38 ca 44 01 12 \tsha1msg2 0x12(%rcx,%rax,1),%xmm0",},
529{{0x0f, 0x38, 0xca, 0x44, 0x05, 0x12, }, 6, 0, "", "",
530"0f 38 ca 44 05 12 \tsha1msg2 0x12(%rbp,%rax,1),%xmm0",},
531{{0x0f, 0x38, 0xca, 0x44, 0x08, 0x12, }, 6, 0, "", "",
532"0f 38 ca 44 08 12 \tsha1msg2 0x12(%rax,%rcx,1),%xmm0",},
533{{0x0f, 0x38, 0xca, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
534"0f 38 ca 44 c8 12 \tsha1msg2 0x12(%rax,%rcx,8),%xmm0",},
535{{0x0f, 0x38, 0xca, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
536"0f 38 ca 80 78 56 34 12 \tsha1msg2 0x12345678(%rax),%xmm0",},
537{{0x0f, 0x38, 0xca, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
538"0f 38 ca 85 78 56 34 12 \tsha1msg2 0x12345678(%rbp),%xmm0",},
539{{0x0f, 0x38, 0xca, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
540"0f 38 ca 84 01 78 56 34 12 \tsha1msg2 0x12345678(%rcx,%rax,1),%xmm0",},
541{{0x0f, 0x38, 0xca, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
542"0f 38 ca 84 05 78 56 34 12 \tsha1msg2 0x12345678(%rbp,%rax,1),%xmm0",},
543{{0x0f, 0x38, 0xca, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
544"0f 38 ca 84 08 78 56 34 12 \tsha1msg2 0x12345678(%rax,%rcx,1),%xmm0",},
545{{0x0f, 0x38, 0xca, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
546"0f 38 ca 84 c8 78 56 34 12 \tsha1msg2 0x12345678(%rax,%rcx,8),%xmm0",},
547{{0x44, 0x0f, 0x38, 0xca, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
548"44 0f 38 ca bc c8 78 56 34 12 \tsha1msg2 0x12345678(%rax,%rcx,8),%xmm15",},
549{{0x0f, 0x38, 0xcb, 0xcc, }, 4, 0, "", "",
550"0f 38 cb cc \tsha256rnds2 %xmm0,%xmm4,%xmm1",},
551{{0x0f, 0x38, 0xcb, 0xd7, }, 4, 0, "", "",
552"0f 38 cb d7 \tsha256rnds2 %xmm0,%xmm7,%xmm2",},
553{{0x41, 0x0f, 0x38, 0xcb, 0xc8, }, 5, 0, "", "",
554"41 0f 38 cb c8 \tsha256rnds2 %xmm0,%xmm8,%xmm1",},
555{{0x44, 0x0f, 0x38, 0xcb, 0xc7, }, 5, 0, "", "",
556"44 0f 38 cb c7 \tsha256rnds2 %xmm0,%xmm7,%xmm8",},
557{{0x45, 0x0f, 0x38, 0xcb, 0xc7, }, 5, 0, "", "",
558"45 0f 38 cb c7 \tsha256rnds2 %xmm0,%xmm15,%xmm8",},
559{{0x0f, 0x38, 0xcb, 0x08, }, 4, 0, "", "",
560"0f 38 cb 08 \tsha256rnds2 %xmm0,(%rax),%xmm1",},
561{{0x41, 0x0f, 0x38, 0xcb, 0x08, }, 5, 0, "", "",
562"41 0f 38 cb 08 \tsha256rnds2 %xmm0,(%r8),%xmm1",},
563{{0x0f, 0x38, 0xcb, 0x0c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
564"0f 38 cb 0c 25 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678,%xmm1",},
565{{0x0f, 0x38, 0xcb, 0x18, }, 4, 0, "", "",
566"0f 38 cb 18 \tsha256rnds2 %xmm0,(%rax),%xmm3",},
567{{0x0f, 0x38, 0xcb, 0x0c, 0x01, }, 5, 0, "", "",
568"0f 38 cb 0c 01 \tsha256rnds2 %xmm0,(%rcx,%rax,1),%xmm1",},
569{{0x0f, 0x38, 0xcb, 0x0c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
570"0f 38 cb 0c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(,%rax,1),%xmm1",},
571{{0x0f, 0x38, 0xcb, 0x0c, 0x08, }, 5, 0, "", "",
572"0f 38 cb 0c 08 \tsha256rnds2 %xmm0,(%rax,%rcx,1),%xmm1",},
573{{0x0f, 0x38, 0xcb, 0x0c, 0xc8, }, 5, 0, "", "",
574"0f 38 cb 0c c8 \tsha256rnds2 %xmm0,(%rax,%rcx,8),%xmm1",},
575{{0x0f, 0x38, 0xcb, 0x48, 0x12, }, 5, 0, "", "",
576"0f 38 cb 48 12 \tsha256rnds2 %xmm0,0x12(%rax),%xmm1",},
577{{0x0f, 0x38, 0xcb, 0x4d, 0x12, }, 5, 0, "", "",
578"0f 38 cb 4d 12 \tsha256rnds2 %xmm0,0x12(%rbp),%xmm1",},
579{{0x0f, 0x38, 0xcb, 0x4c, 0x01, 0x12, }, 6, 0, "", "",
580"0f 38 cb 4c 01 12 \tsha256rnds2 %xmm0,0x12(%rcx,%rax,1),%xmm1",},
581{{0x0f, 0x38, 0xcb, 0x4c, 0x05, 0x12, }, 6, 0, "", "",
582"0f 38 cb 4c 05 12 \tsha256rnds2 %xmm0,0x12(%rbp,%rax,1),%xmm1",},
583{{0x0f, 0x38, 0xcb, 0x4c, 0x08, 0x12, }, 6, 0, "", "",
584"0f 38 cb 4c 08 12 \tsha256rnds2 %xmm0,0x12(%rax,%rcx,1),%xmm1",},
585{{0x0f, 0x38, 0xcb, 0x4c, 0xc8, 0x12, }, 6, 0, "", "",
586"0f 38 cb 4c c8 12 \tsha256rnds2 %xmm0,0x12(%rax,%rcx,8),%xmm1",},
587{{0x0f, 0x38, 0xcb, 0x88, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
588"0f 38 cb 88 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax),%xmm1",},
589{{0x0f, 0x38, 0xcb, 0x8d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
590"0f 38 cb 8d 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rbp),%xmm1",},
591{{0x0f, 0x38, 0xcb, 0x8c, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
592"0f 38 cb 8c 01 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rcx,%rax,1),%xmm1",},
593{{0x0f, 0x38, 0xcb, 0x8c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
594"0f 38 cb 8c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rbp,%rax,1),%xmm1",},
595{{0x0f, 0x38, 0xcb, 0x8c, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
596"0f 38 cb 8c 08 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax,%rcx,1),%xmm1",},
597{{0x0f, 0x38, 0xcb, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
598"0f 38 cb 8c c8 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax,%rcx,8),%xmm1",},
599{{0x44, 0x0f, 0x38, 0xcb, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
600"44 0f 38 cb bc c8 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax,%rcx,8),%xmm15",},
601{{0x0f, 0x38, 0xcc, 0xc1, }, 4, 0, "", "",
602"0f 38 cc c1 \tsha256msg1 %xmm1,%xmm0",},
603{{0x0f, 0x38, 0xcc, 0xd7, }, 4, 0, "", "",
604"0f 38 cc d7 \tsha256msg1 %xmm7,%xmm2",},
605{{0x41, 0x0f, 0x38, 0xcc, 0xc0, }, 5, 0, "", "",
606"41 0f 38 cc c0 \tsha256msg1 %xmm8,%xmm0",},
607{{0x44, 0x0f, 0x38, 0xcc, 0xc7, }, 5, 0, "", "",
608"44 0f 38 cc c7 \tsha256msg1 %xmm7,%xmm8",},
609{{0x45, 0x0f, 0x38, 0xcc, 0xc7, }, 5, 0, "", "",
610"45 0f 38 cc c7 \tsha256msg1 %xmm15,%xmm8",},
611{{0x0f, 0x38, 0xcc, 0x00, }, 4, 0, "", "",
612"0f 38 cc 00 \tsha256msg1 (%rax),%xmm0",},
613{{0x41, 0x0f, 0x38, 0xcc, 0x00, }, 5, 0, "", "",
614"41 0f 38 cc 00 \tsha256msg1 (%r8),%xmm0",},
615{{0x0f, 0x38, 0xcc, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
616"0f 38 cc 04 25 78 56 34 12 \tsha256msg1 0x12345678,%xmm0",},
617{{0x0f, 0x38, 0xcc, 0x18, }, 4, 0, "", "",
618"0f 38 cc 18 \tsha256msg1 (%rax),%xmm3",},
619{{0x0f, 0x38, 0xcc, 0x04, 0x01, }, 5, 0, "", "",
620"0f 38 cc 04 01 \tsha256msg1 (%rcx,%rax,1),%xmm0",},
621{{0x0f, 0x38, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
622"0f 38 cc 04 05 78 56 34 12 \tsha256msg1 0x12345678(,%rax,1),%xmm0",},
623{{0x0f, 0x38, 0xcc, 0x04, 0x08, }, 5, 0, "", "",
624"0f 38 cc 04 08 \tsha256msg1 (%rax,%rcx,1),%xmm0",},
625{{0x0f, 0x38, 0xcc, 0x04, 0xc8, }, 5, 0, "", "",
626"0f 38 cc 04 c8 \tsha256msg1 (%rax,%rcx,8),%xmm0",},
627{{0x0f, 0x38, 0xcc, 0x40, 0x12, }, 5, 0, "", "",
628"0f 38 cc 40 12 \tsha256msg1 0x12(%rax),%xmm0",},
629{{0x0f, 0x38, 0xcc, 0x45, 0x12, }, 5, 0, "", "",
630"0f 38 cc 45 12 \tsha256msg1 0x12(%rbp),%xmm0",},
631{{0x0f, 0x38, 0xcc, 0x44, 0x01, 0x12, }, 6, 0, "", "",
632"0f 38 cc 44 01 12 \tsha256msg1 0x12(%rcx,%rax,1),%xmm0",},
633{{0x0f, 0x38, 0xcc, 0x44, 0x05, 0x12, }, 6, 0, "", "",
634"0f 38 cc 44 05 12 \tsha256msg1 0x12(%rbp,%rax,1),%xmm0",},
635{{0x0f, 0x38, 0xcc, 0x44, 0x08, 0x12, }, 6, 0, "", "",
636"0f 38 cc 44 08 12 \tsha256msg1 0x12(%rax,%rcx,1),%xmm0",},
637{{0x0f, 0x38, 0xcc, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
638"0f 38 cc 44 c8 12 \tsha256msg1 0x12(%rax,%rcx,8),%xmm0",},
639{{0x0f, 0x38, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
640"0f 38 cc 80 78 56 34 12 \tsha256msg1 0x12345678(%rax),%xmm0",},
641{{0x0f, 0x38, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
642"0f 38 cc 85 78 56 34 12 \tsha256msg1 0x12345678(%rbp),%xmm0",},
643{{0x0f, 0x38, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
644"0f 38 cc 84 01 78 56 34 12 \tsha256msg1 0x12345678(%rcx,%rax,1),%xmm0",},
645{{0x0f, 0x38, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
646"0f 38 cc 84 05 78 56 34 12 \tsha256msg1 0x12345678(%rbp,%rax,1),%xmm0",},
647{{0x0f, 0x38, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
648"0f 38 cc 84 08 78 56 34 12 \tsha256msg1 0x12345678(%rax,%rcx,1),%xmm0",},
649{{0x0f, 0x38, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
650"0f 38 cc 84 c8 78 56 34 12 \tsha256msg1 0x12345678(%rax,%rcx,8),%xmm0",},
651{{0x44, 0x0f, 0x38, 0xcc, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
652"44 0f 38 cc bc c8 78 56 34 12 \tsha256msg1 0x12345678(%rax,%rcx,8),%xmm15",},
653{{0x0f, 0x38, 0xcd, 0xc1, }, 4, 0, "", "",
654"0f 38 cd c1 \tsha256msg2 %xmm1,%xmm0",},
655{{0x0f, 0x38, 0xcd, 0xd7, }, 4, 0, "", "",
656"0f 38 cd d7 \tsha256msg2 %xmm7,%xmm2",},
657{{0x41, 0x0f, 0x38, 0xcd, 0xc0, }, 5, 0, "", "",
658"41 0f 38 cd c0 \tsha256msg2 %xmm8,%xmm0",},
659{{0x44, 0x0f, 0x38, 0xcd, 0xc7, }, 5, 0, "", "",
660"44 0f 38 cd c7 \tsha256msg2 %xmm7,%xmm8",},
661{{0x45, 0x0f, 0x38, 0xcd, 0xc7, }, 5, 0, "", "",
662"45 0f 38 cd c7 \tsha256msg2 %xmm15,%xmm8",},
663{{0x0f, 0x38, 0xcd, 0x00, }, 4, 0, "", "",
664"0f 38 cd 00 \tsha256msg2 (%rax),%xmm0",},
665{{0x41, 0x0f, 0x38, 0xcd, 0x00, }, 5, 0, "", "",
666"41 0f 38 cd 00 \tsha256msg2 (%r8),%xmm0",},
667{{0x0f, 0x38, 0xcd, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
668"0f 38 cd 04 25 78 56 34 12 \tsha256msg2 0x12345678,%xmm0",},
669{{0x0f, 0x38, 0xcd, 0x18, }, 4, 0, "", "",
670"0f 38 cd 18 \tsha256msg2 (%rax),%xmm3",},
671{{0x0f, 0x38, 0xcd, 0x04, 0x01, }, 5, 0, "", "",
672"0f 38 cd 04 01 \tsha256msg2 (%rcx,%rax,1),%xmm0",},
673{{0x0f, 0x38, 0xcd, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
674"0f 38 cd 04 05 78 56 34 12 \tsha256msg2 0x12345678(,%rax,1),%xmm0",},
675{{0x0f, 0x38, 0xcd, 0x04, 0x08, }, 5, 0, "", "",
676"0f 38 cd 04 08 \tsha256msg2 (%rax,%rcx,1),%xmm0",},
677{{0x0f, 0x38, 0xcd, 0x04, 0xc8, }, 5, 0, "", "",
678"0f 38 cd 04 c8 \tsha256msg2 (%rax,%rcx,8),%xmm0",},
679{{0x0f, 0x38, 0xcd, 0x40, 0x12, }, 5, 0, "", "",
680"0f 38 cd 40 12 \tsha256msg2 0x12(%rax),%xmm0",},
681{{0x0f, 0x38, 0xcd, 0x45, 0x12, }, 5, 0, "", "",
682"0f 38 cd 45 12 \tsha256msg2 0x12(%rbp),%xmm0",},
683{{0x0f, 0x38, 0xcd, 0x44, 0x01, 0x12, }, 6, 0, "", "",
684"0f 38 cd 44 01 12 \tsha256msg2 0x12(%rcx,%rax,1),%xmm0",},
685{{0x0f, 0x38, 0xcd, 0x44, 0x05, 0x12, }, 6, 0, "", "",
686"0f 38 cd 44 05 12 \tsha256msg2 0x12(%rbp,%rax,1),%xmm0",},
687{{0x0f, 0x38, 0xcd, 0x44, 0x08, 0x12, }, 6, 0, "", "",
688"0f 38 cd 44 08 12 \tsha256msg2 0x12(%rax,%rcx,1),%xmm0",},
689{{0x0f, 0x38, 0xcd, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
690"0f 38 cd 44 c8 12 \tsha256msg2 0x12(%rax,%rcx,8),%xmm0",},
691{{0x0f, 0x38, 0xcd, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
692"0f 38 cd 80 78 56 34 12 \tsha256msg2 0x12345678(%rax),%xmm0",},
693{{0x0f, 0x38, 0xcd, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
694"0f 38 cd 85 78 56 34 12 \tsha256msg2 0x12345678(%rbp),%xmm0",},
695{{0x0f, 0x38, 0xcd, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
696"0f 38 cd 84 01 78 56 34 12 \tsha256msg2 0x12345678(%rcx,%rax,1),%xmm0",},
697{{0x0f, 0x38, 0xcd, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
698"0f 38 cd 84 05 78 56 34 12 \tsha256msg2 0x12345678(%rbp,%rax,1),%xmm0",},
699{{0x0f, 0x38, 0xcd, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
700"0f 38 cd 84 08 78 56 34 12 \tsha256msg2 0x12345678(%rax,%rcx,1),%xmm0",},
701{{0x0f, 0x38, 0xcd, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
702"0f 38 cd 84 c8 78 56 34 12 \tsha256msg2 0x12345678(%rax,%rcx,8),%xmm0",},
703{{0x44, 0x0f, 0x38, 0xcd, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
704"44 0f 38 cd bc c8 78 56 34 12 \tsha256msg2 0x12345678(%rax,%rcx,8),%xmm15",},
705{{0x66, 0x0f, 0xae, 0x38, }, 4, 0, "", "",
706"66 0f ae 38 \tclflushopt (%rax)",},
707{{0x66, 0x41, 0x0f, 0xae, 0x38, }, 5, 0, "", "",
708"66 41 0f ae 38 \tclflushopt (%r8)",},
709{{0x66, 0x0f, 0xae, 0x3c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
710"66 0f ae 3c 25 78 56 34 12 \tclflushopt 0x12345678",},
711{{0x66, 0x0f, 0xae, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
712"66 0f ae bc c8 78 56 34 12 \tclflushopt 0x12345678(%rax,%rcx,8)",},
713{{0x66, 0x41, 0x0f, 0xae, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
714"66 41 0f ae bc c8 78 56 34 12 \tclflushopt 0x12345678(%r8,%rcx,8)",},
715{{0x0f, 0xae, 0x38, }, 3, 0, "", "",
716"0f ae 38 \tclflush (%rax)",},
717{{0x41, 0x0f, 0xae, 0x38, }, 4, 0, "", "",
718"41 0f ae 38 \tclflush (%r8)",},
719{{0x0f, 0xae, 0xf8, }, 3, 0, "", "",
720"0f ae f8 \tsfence ",},
721{{0x66, 0x0f, 0xae, 0x30, }, 4, 0, "", "",
722"66 0f ae 30 \tclwb (%rax)",},
723{{0x66, 0x41, 0x0f, 0xae, 0x30, }, 5, 0, "", "",
724"66 41 0f ae 30 \tclwb (%r8)",},
725{{0x66, 0x0f, 0xae, 0x34, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
726"66 0f ae 34 25 78 56 34 12 \tclwb 0x12345678",},
727{{0x66, 0x0f, 0xae, 0xb4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
728"66 0f ae b4 c8 78 56 34 12 \tclwb 0x12345678(%rax,%rcx,8)",},
729{{0x66, 0x41, 0x0f, 0xae, 0xb4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
730"66 41 0f ae b4 c8 78 56 34 12 \tclwb 0x12345678(%r8,%rcx,8)",},
731{{0x0f, 0xae, 0x30, }, 3, 0, "", "",
732"0f ae 30 \txsaveopt (%rax)",},
733{{0x41, 0x0f, 0xae, 0x30, }, 4, 0, "", "",
734"41 0f ae 30 \txsaveopt (%r8)",},
735{{0x0f, 0xae, 0xf0, }, 3, 0, "", "",
736"0f ae f0 \tmfence ",},
737{{0x0f, 0xc7, 0x20, }, 3, 0, "", "",
738"0f c7 20 \txsavec (%rax)",},
739{{0x41, 0x0f, 0xc7, 0x20, }, 4, 0, "", "",
740"41 0f c7 20 \txsavec (%r8)",},
741{{0x0f, 0xc7, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
742"0f c7 24 25 78 56 34 12 \txsavec 0x12345678",},
743{{0x0f, 0xc7, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
744"0f c7 a4 c8 78 56 34 12 \txsavec 0x12345678(%rax,%rcx,8)",},
745{{0x41, 0x0f, 0xc7, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
746"41 0f c7 a4 c8 78 56 34 12 \txsavec 0x12345678(%r8,%rcx,8)",},
747{{0x0f, 0xc7, 0x28, }, 3, 0, "", "",
748"0f c7 28 \txsaves (%rax)",},
749{{0x41, 0x0f, 0xc7, 0x28, }, 4, 0, "", "",
750"41 0f c7 28 \txsaves (%r8)",},
751{{0x0f, 0xc7, 0x2c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
752"0f c7 2c 25 78 56 34 12 \txsaves 0x12345678",},
753{{0x0f, 0xc7, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
754"0f c7 ac c8 78 56 34 12 \txsaves 0x12345678(%rax,%rcx,8)",},
755{{0x41, 0x0f, 0xc7, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
756"41 0f c7 ac c8 78 56 34 12 \txsaves 0x12345678(%r8,%rcx,8)",},
757{{0x0f, 0xc7, 0x18, }, 3, 0, "", "",
758"0f c7 18 \txrstors (%rax)",},
759{{0x41, 0x0f, 0xc7, 0x18, }, 4, 0, "", "",
760"41 0f c7 18 \txrstors (%r8)",},
761{{0x0f, 0xc7, 0x1c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
762"0f c7 1c 25 78 56 34 12 \txrstors 0x12345678",},
763{{0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
764"0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%rax,%rcx,8)",},
765{{0x41, 0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
766"41 0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%r8,%rcx,8)",},
767{{0x66, 0x0f, 0xae, 0xf8, }, 4, 0, "", "",
768"66 0f ae f8 \tpcommit ",},
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-src.c b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
new file mode 100644
index 000000000000..41b1b1c62660
--- /dev/null
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
@@ -0,0 +1,877 @@
1/*
2 * This file contains instructions for testing by the test titled:
3 *
4 * "Test x86 instruction decoder - new instructions"
5 *
6 * Note that the 'Expecting' comment lines are consumed by the
7 * gen-insn-x86-dat.awk script and have the format:
8 *
9 * Expecting: <op> <branch> <rel>
10 *
11 * If this file is changed, remember to run the gen-insn-x86-dat.sh
12 * script and commit the result.
13 *
14 * Refer to insn-x86.c for more details.
15 */
16
17int main(void)
18{
19 /* Following line is a marker for the awk script - do not change */
20 asm volatile("rdtsc"); /* Start here */
21
22#ifdef __x86_64__
23
24 /* bndmk m64, bnd */
25
26 asm volatile("bndmk (%rax), %bnd0");
27 asm volatile("bndmk (%r8), %bnd0");
28 asm volatile("bndmk (0x12345678), %bnd0");
29 asm volatile("bndmk (%rax), %bnd3");
30 asm volatile("bndmk (%rcx,%rax,1), %bnd0");
31 asm volatile("bndmk 0x12345678(,%rax,1), %bnd0");
32 asm volatile("bndmk (%rax,%rcx,1), %bnd0");
33 asm volatile("bndmk (%rax,%rcx,8), %bnd0");
34 asm volatile("bndmk 0x12(%rax), %bnd0");
35 asm volatile("bndmk 0x12(%rbp), %bnd0");
36 asm volatile("bndmk 0x12(%rcx,%rax,1), %bnd0");
37 asm volatile("bndmk 0x12(%rbp,%rax,1), %bnd0");
38 asm volatile("bndmk 0x12(%rax,%rcx,1), %bnd0");
39 asm volatile("bndmk 0x12(%rax,%rcx,8), %bnd0");
40 asm volatile("bndmk 0x12345678(%rax), %bnd0");
41 asm volatile("bndmk 0x12345678(%rbp), %bnd0");
42 asm volatile("bndmk 0x12345678(%rcx,%rax,1), %bnd0");
43 asm volatile("bndmk 0x12345678(%rbp,%rax,1), %bnd0");
44 asm volatile("bndmk 0x12345678(%rax,%rcx,1), %bnd0");
45 asm volatile("bndmk 0x12345678(%rax,%rcx,8), %bnd0");
46
47 /* bndcl r/m64, bnd */
48
49 asm volatile("bndcl (%rax), %bnd0");
50 asm volatile("bndcl (%r8), %bnd0");
51 asm volatile("bndcl (0x12345678), %bnd0");
52 asm volatile("bndcl (%rax), %bnd3");
53 asm volatile("bndcl (%rcx,%rax,1), %bnd0");
54 asm volatile("bndcl 0x12345678(,%rax,1), %bnd0");
55 asm volatile("bndcl (%rax,%rcx,1), %bnd0");
56 asm volatile("bndcl (%rax,%rcx,8), %bnd0");
57 asm volatile("bndcl 0x12(%rax), %bnd0");
58 asm volatile("bndcl 0x12(%rbp), %bnd0");
59 asm volatile("bndcl 0x12(%rcx,%rax,1), %bnd0");
60 asm volatile("bndcl 0x12(%rbp,%rax,1), %bnd0");
61 asm volatile("bndcl 0x12(%rax,%rcx,1), %bnd0");
62 asm volatile("bndcl 0x12(%rax,%rcx,8), %bnd0");
63 asm volatile("bndcl 0x12345678(%rax), %bnd0");
64 asm volatile("bndcl 0x12345678(%rbp), %bnd0");
65 asm volatile("bndcl 0x12345678(%rcx,%rax,1), %bnd0");
66 asm volatile("bndcl 0x12345678(%rbp,%rax,1), %bnd0");
67 asm volatile("bndcl 0x12345678(%rax,%rcx,1), %bnd0");
68 asm volatile("bndcl 0x12345678(%rax,%rcx,8), %bnd0");
69 asm volatile("bndcl %rax, %bnd0");
70
71 /* bndcu r/m64, bnd */
72
73 asm volatile("bndcu (%rax), %bnd0");
74 asm volatile("bndcu (%r8), %bnd0");
75 asm volatile("bndcu (0x12345678), %bnd0");
76 asm volatile("bndcu (%rax), %bnd3");
77 asm volatile("bndcu (%rcx,%rax,1), %bnd0");
78 asm volatile("bndcu 0x12345678(,%rax,1), %bnd0");
79 asm volatile("bndcu (%rax,%rcx,1), %bnd0");
80 asm volatile("bndcu (%rax,%rcx,8), %bnd0");
81 asm volatile("bndcu 0x12(%rax), %bnd0");
82 asm volatile("bndcu 0x12(%rbp), %bnd0");
83 asm volatile("bndcu 0x12(%rcx,%rax,1), %bnd0");
84 asm volatile("bndcu 0x12(%rbp,%rax,1), %bnd0");
85 asm volatile("bndcu 0x12(%rax,%rcx,1), %bnd0");
86 asm volatile("bndcu 0x12(%rax,%rcx,8), %bnd0");
87 asm volatile("bndcu 0x12345678(%rax), %bnd0");
88 asm volatile("bndcu 0x12345678(%rbp), %bnd0");
89 asm volatile("bndcu 0x12345678(%rcx,%rax,1), %bnd0");
90 asm volatile("bndcu 0x12345678(%rbp,%rax,1), %bnd0");
91 asm volatile("bndcu 0x12345678(%rax,%rcx,1), %bnd0");
92 asm volatile("bndcu 0x12345678(%rax,%rcx,8), %bnd0");
93 asm volatile("bndcu %rax, %bnd0");
94
95 /* bndcn r/m64, bnd */
96
97 asm volatile("bndcn (%rax), %bnd0");
98 asm volatile("bndcn (%r8), %bnd0");
99 asm volatile("bndcn (0x12345678), %bnd0");
100 asm volatile("bndcn (%rax), %bnd3");
101 asm volatile("bndcn (%rcx,%rax,1), %bnd0");
102 asm volatile("bndcn 0x12345678(,%rax,1), %bnd0");
103 asm volatile("bndcn (%rax,%rcx,1), %bnd0");
104 asm volatile("bndcn (%rax,%rcx,8), %bnd0");
105 asm volatile("bndcn 0x12(%rax), %bnd0");
106 asm volatile("bndcn 0x12(%rbp), %bnd0");
107 asm volatile("bndcn 0x12(%rcx,%rax,1), %bnd0");
108 asm volatile("bndcn 0x12(%rbp,%rax,1), %bnd0");
109 asm volatile("bndcn 0x12(%rax,%rcx,1), %bnd0");
110 asm volatile("bndcn 0x12(%rax,%rcx,8), %bnd0");
111 asm volatile("bndcn 0x12345678(%rax), %bnd0");
112 asm volatile("bndcn 0x12345678(%rbp), %bnd0");
113 asm volatile("bndcn 0x12345678(%rcx,%rax,1), %bnd0");
114 asm volatile("bndcn 0x12345678(%rbp,%rax,1), %bnd0");
115 asm volatile("bndcn 0x12345678(%rax,%rcx,1), %bnd0");
116 asm volatile("bndcn 0x12345678(%rax,%rcx,8), %bnd0");
117 asm volatile("bndcn %rax, %bnd0");
118
119 /* bndmov m128, bnd */
120
121 asm volatile("bndmov (%rax), %bnd0");
122 asm volatile("bndmov (%r8), %bnd0");
123 asm volatile("bndmov (0x12345678), %bnd0");
124 asm volatile("bndmov (%rax), %bnd3");
125 asm volatile("bndmov (%rcx,%rax,1), %bnd0");
126 asm volatile("bndmov 0x12345678(,%rax,1), %bnd0");
127 asm volatile("bndmov (%rax,%rcx,1), %bnd0");
128 asm volatile("bndmov (%rax,%rcx,8), %bnd0");
129 asm volatile("bndmov 0x12(%rax), %bnd0");
130 asm volatile("bndmov 0x12(%rbp), %bnd0");
131 asm volatile("bndmov 0x12(%rcx,%rax,1), %bnd0");
132 asm volatile("bndmov 0x12(%rbp,%rax,1), %bnd0");
133 asm volatile("bndmov 0x12(%rax,%rcx,1), %bnd0");
134 asm volatile("bndmov 0x12(%rax,%rcx,8), %bnd0");
135 asm volatile("bndmov 0x12345678(%rax), %bnd0");
136 asm volatile("bndmov 0x12345678(%rbp), %bnd0");
137 asm volatile("bndmov 0x12345678(%rcx,%rax,1), %bnd0");
138 asm volatile("bndmov 0x12345678(%rbp,%rax,1), %bnd0");
139 asm volatile("bndmov 0x12345678(%rax,%rcx,1), %bnd0");
140 asm volatile("bndmov 0x12345678(%rax,%rcx,8), %bnd0");
141
142 /* bndmov bnd, m128 */
143
144 asm volatile("bndmov %bnd0, (%rax)");
145 asm volatile("bndmov %bnd0, (%r8)");
146 asm volatile("bndmov %bnd0, (0x12345678)");
147 asm volatile("bndmov %bnd3, (%rax)");
148 asm volatile("bndmov %bnd0, (%rcx,%rax,1)");
149 asm volatile("bndmov %bnd0, 0x12345678(,%rax,1)");
150 asm volatile("bndmov %bnd0, (%rax,%rcx,1)");
151 asm volatile("bndmov %bnd0, (%rax,%rcx,8)");
152 asm volatile("bndmov %bnd0, 0x12(%rax)");
153 asm volatile("bndmov %bnd0, 0x12(%rbp)");
154 asm volatile("bndmov %bnd0, 0x12(%rcx,%rax,1)");
155 asm volatile("bndmov %bnd0, 0x12(%rbp,%rax,1)");
156 asm volatile("bndmov %bnd0, 0x12(%rax,%rcx,1)");
157 asm volatile("bndmov %bnd0, 0x12(%rax,%rcx,8)");
158 asm volatile("bndmov %bnd0, 0x12345678(%rax)");
159 asm volatile("bndmov %bnd0, 0x12345678(%rbp)");
160 asm volatile("bndmov %bnd0, 0x12345678(%rcx,%rax,1)");
161 asm volatile("bndmov %bnd0, 0x12345678(%rbp,%rax,1)");
162 asm volatile("bndmov %bnd0, 0x12345678(%rax,%rcx,1)");
163 asm volatile("bndmov %bnd0, 0x12345678(%rax,%rcx,8)");
164
165 /* bndmov bnd2, bnd1 */
166
167 asm volatile("bndmov %bnd0, %bnd1");
168 asm volatile("bndmov %bnd1, %bnd0");
169
170 /* bndldx mib, bnd */
171
172 asm volatile("bndldx (%rax), %bnd0");
173 asm volatile("bndldx (%r8), %bnd0");
174 asm volatile("bndldx (0x12345678), %bnd0");
175 asm volatile("bndldx (%rax), %bnd3");
176 asm volatile("bndldx (%rcx,%rax,1), %bnd0");
177 asm volatile("bndldx 0x12345678(,%rax,1), %bnd0");
178 asm volatile("bndldx (%rax,%rcx,1), %bnd0");
179 asm volatile("bndldx 0x12(%rax), %bnd0");
180 asm volatile("bndldx 0x12(%rbp), %bnd0");
181 asm volatile("bndldx 0x12(%rcx,%rax,1), %bnd0");
182 asm volatile("bndldx 0x12(%rbp,%rax,1), %bnd0");
183 asm volatile("bndldx 0x12(%rax,%rcx,1), %bnd0");
184 asm volatile("bndldx 0x12345678(%rax), %bnd0");
185 asm volatile("bndldx 0x12345678(%rbp), %bnd0");
186 asm volatile("bndldx 0x12345678(%rcx,%rax,1), %bnd0");
187 asm volatile("bndldx 0x12345678(%rbp,%rax,1), %bnd0");
188 asm volatile("bndldx 0x12345678(%rax,%rcx,1), %bnd0");
189
190 /* bndstx bnd, mib */
191
192 asm volatile("bndstx %bnd0, (%rax)");
193 asm volatile("bndstx %bnd0, (%r8)");
194 asm volatile("bndstx %bnd0, (0x12345678)");
195 asm volatile("bndstx %bnd3, (%rax)");
196 asm volatile("bndstx %bnd0, (%rcx,%rax,1)");
197 asm volatile("bndstx %bnd0, 0x12345678(,%rax,1)");
198 asm volatile("bndstx %bnd0, (%rax,%rcx,1)");
199 asm volatile("bndstx %bnd0, 0x12(%rax)");
200 asm volatile("bndstx %bnd0, 0x12(%rbp)");
201 asm volatile("bndstx %bnd0, 0x12(%rcx,%rax,1)");
202 asm volatile("bndstx %bnd0, 0x12(%rbp,%rax,1)");
203 asm volatile("bndstx %bnd0, 0x12(%rax,%rcx,1)");
204 asm volatile("bndstx %bnd0, 0x12345678(%rax)");
205 asm volatile("bndstx %bnd0, 0x12345678(%rbp)");
206 asm volatile("bndstx %bnd0, 0x12345678(%rcx,%rax,1)");
207 asm volatile("bndstx %bnd0, 0x12345678(%rbp,%rax,1)");
208 asm volatile("bndstx %bnd0, 0x12345678(%rax,%rcx,1)");
209
210 /* bnd prefix on call, ret, jmp and all jcc */
211
212 asm volatile("bnd call label1"); /* Expecting: call unconditional 0 */
213 asm volatile("bnd call *(%eax)"); /* Expecting: call indirect 0 */
214 asm volatile("bnd ret"); /* Expecting: ret indirect 0 */
215 asm volatile("bnd jmp label1"); /* Expecting: jmp unconditional 0 */
216 asm volatile("bnd jmp label1"); /* Expecting: jmp unconditional 0 */
217 asm volatile("bnd jmp *(%ecx)"); /* Expecting: jmp indirect 0 */
218 asm volatile("bnd jne label1"); /* Expecting: jcc conditional 0 */
219
220 /* sha1rnds4 imm8, xmm2/m128, xmm1 */
221
222 asm volatile("sha1rnds4 $0x0, %xmm1, %xmm0");
223 asm volatile("sha1rnds4 $0x91, %xmm7, %xmm2");
224 asm volatile("sha1rnds4 $0x91, %xmm8, %xmm0");
225 asm volatile("sha1rnds4 $0x91, %xmm7, %xmm8");
226 asm volatile("sha1rnds4 $0x91, %xmm15, %xmm8");
227 asm volatile("sha1rnds4 $0x91, (%rax), %xmm0");
228 asm volatile("sha1rnds4 $0x91, (%r8), %xmm0");
229 asm volatile("sha1rnds4 $0x91, (0x12345678), %xmm0");
230 asm volatile("sha1rnds4 $0x91, (%rax), %xmm3");
231 asm volatile("sha1rnds4 $0x91, (%rcx,%rax,1), %xmm0");
232 asm volatile("sha1rnds4 $0x91, 0x12345678(,%rax,1), %xmm0");
233 asm volatile("sha1rnds4 $0x91, (%rax,%rcx,1), %xmm0");
234 asm volatile("sha1rnds4 $0x91, (%rax,%rcx,8), %xmm0");
235 asm volatile("sha1rnds4 $0x91, 0x12(%rax), %xmm0");
236 asm volatile("sha1rnds4 $0x91, 0x12(%rbp), %xmm0");
237 asm volatile("sha1rnds4 $0x91, 0x12(%rcx,%rax,1), %xmm0");
238 asm volatile("sha1rnds4 $0x91, 0x12(%rbp,%rax,1), %xmm0");
239 asm volatile("sha1rnds4 $0x91, 0x12(%rax,%rcx,1), %xmm0");
240 asm volatile("sha1rnds4 $0x91, 0x12(%rax,%rcx,8), %xmm0");
241 asm volatile("sha1rnds4 $0x91, 0x12345678(%rax), %xmm0");
242 asm volatile("sha1rnds4 $0x91, 0x12345678(%rbp), %xmm0");
243 asm volatile("sha1rnds4 $0x91, 0x12345678(%rcx,%rax,1), %xmm0");
244 asm volatile("sha1rnds4 $0x91, 0x12345678(%rbp,%rax,1), %xmm0");
245 asm volatile("sha1rnds4 $0x91, 0x12345678(%rax,%rcx,1), %xmm0");
246 asm volatile("sha1rnds4 $0x91, 0x12345678(%rax,%rcx,8), %xmm0");
247 asm volatile("sha1rnds4 $0x91, 0x12345678(%rax,%rcx,8), %xmm15");
248
249 /* sha1nexte xmm2/m128, xmm1 */
250
251 asm volatile("sha1nexte %xmm1, %xmm0");
252 asm volatile("sha1nexte %xmm7, %xmm2");
253 asm volatile("sha1nexte %xmm8, %xmm0");
254 asm volatile("sha1nexte %xmm7, %xmm8");
255 asm volatile("sha1nexte %xmm15, %xmm8");
256 asm volatile("sha1nexte (%rax), %xmm0");
257 asm volatile("sha1nexte (%r8), %xmm0");
258 asm volatile("sha1nexte (0x12345678), %xmm0");
259 asm volatile("sha1nexte (%rax), %xmm3");
260 asm volatile("sha1nexte (%rcx,%rax,1), %xmm0");
261 asm volatile("sha1nexte 0x12345678(,%rax,1), %xmm0");
262 asm volatile("sha1nexte (%rax,%rcx,1), %xmm0");
263 asm volatile("sha1nexte (%rax,%rcx,8), %xmm0");
264 asm volatile("sha1nexte 0x12(%rax), %xmm0");
265 asm volatile("sha1nexte 0x12(%rbp), %xmm0");
266 asm volatile("sha1nexte 0x12(%rcx,%rax,1), %xmm0");
267 asm volatile("sha1nexte 0x12(%rbp,%rax,1), %xmm0");
268 asm volatile("sha1nexte 0x12(%rax,%rcx,1), %xmm0");
269 asm volatile("sha1nexte 0x12(%rax,%rcx,8), %xmm0");
270 asm volatile("sha1nexte 0x12345678(%rax), %xmm0");
271 asm volatile("sha1nexte 0x12345678(%rbp), %xmm0");
272 asm volatile("sha1nexte 0x12345678(%rcx,%rax,1), %xmm0");
273 asm volatile("sha1nexte 0x12345678(%rbp,%rax,1), %xmm0");
274 asm volatile("sha1nexte 0x12345678(%rax,%rcx,1), %xmm0");
275 asm volatile("sha1nexte 0x12345678(%rax,%rcx,8), %xmm0");
276 asm volatile("sha1nexte 0x12345678(%rax,%rcx,8), %xmm15");
277
278 /* sha1msg1 xmm2/m128, xmm1 */
279
280 asm volatile("sha1msg1 %xmm1, %xmm0");
281 asm volatile("sha1msg1 %xmm7, %xmm2");
282 asm volatile("sha1msg1 %xmm8, %xmm0");
283 asm volatile("sha1msg1 %xmm7, %xmm8");
284 asm volatile("sha1msg1 %xmm15, %xmm8");
285 asm volatile("sha1msg1 (%rax), %xmm0");
286 asm volatile("sha1msg1 (%r8), %xmm0");
287 asm volatile("sha1msg1 (0x12345678), %xmm0");
288 asm volatile("sha1msg1 (%rax), %xmm3");
289 asm volatile("sha1msg1 (%rcx,%rax,1), %xmm0");
290 asm volatile("sha1msg1 0x12345678(,%rax,1), %xmm0");
291 asm volatile("sha1msg1 (%rax,%rcx,1), %xmm0");
292 asm volatile("sha1msg1 (%rax,%rcx,8), %xmm0");
293 asm volatile("sha1msg1 0x12(%rax), %xmm0");
294 asm volatile("sha1msg1 0x12(%rbp), %xmm0");
295 asm volatile("sha1msg1 0x12(%rcx,%rax,1), %xmm0");
296 asm volatile("sha1msg1 0x12(%rbp,%rax,1), %xmm0");
297 asm volatile("sha1msg1 0x12(%rax,%rcx,1), %xmm0");
298 asm volatile("sha1msg1 0x12(%rax,%rcx,8), %xmm0");
299 asm volatile("sha1msg1 0x12345678(%rax), %xmm0");
300 asm volatile("sha1msg1 0x12345678(%rbp), %xmm0");
301 asm volatile("sha1msg1 0x12345678(%rcx,%rax,1), %xmm0");
302 asm volatile("sha1msg1 0x12345678(%rbp,%rax,1), %xmm0");
303 asm volatile("sha1msg1 0x12345678(%rax,%rcx,1), %xmm0");
304 asm volatile("sha1msg1 0x12345678(%rax,%rcx,8), %xmm0");
305 asm volatile("sha1msg1 0x12345678(%rax,%rcx,8), %xmm15");
306
307 /* sha1msg2 xmm2/m128, xmm1 */
308
309 asm volatile("sha1msg2 %xmm1, %xmm0");
310 asm volatile("sha1msg2 %xmm7, %xmm2");
311 asm volatile("sha1msg2 %xmm8, %xmm0");
312 asm volatile("sha1msg2 %xmm7, %xmm8");
313 asm volatile("sha1msg2 %xmm15, %xmm8");
314 asm volatile("sha1msg2 (%rax), %xmm0");
315 asm volatile("sha1msg2 (%r8), %xmm0");
316 asm volatile("sha1msg2 (0x12345678), %xmm0");
317 asm volatile("sha1msg2 (%rax), %xmm3");
318 asm volatile("sha1msg2 (%rcx,%rax,1), %xmm0");
319 asm volatile("sha1msg2 0x12345678(,%rax,1), %xmm0");
320 asm volatile("sha1msg2 (%rax,%rcx,1), %xmm0");
321 asm volatile("sha1msg2 (%rax,%rcx,8), %xmm0");
322 asm volatile("sha1msg2 0x12(%rax), %xmm0");
323 asm volatile("sha1msg2 0x12(%rbp), %xmm0");
324 asm volatile("sha1msg2 0x12(%rcx,%rax,1), %xmm0");
325 asm volatile("sha1msg2 0x12(%rbp,%rax,1), %xmm0");
326 asm volatile("sha1msg2 0x12(%rax,%rcx,1), %xmm0");
327 asm volatile("sha1msg2 0x12(%rax,%rcx,8), %xmm0");
328 asm volatile("sha1msg2 0x12345678(%rax), %xmm0");
329 asm volatile("sha1msg2 0x12345678(%rbp), %xmm0");
330 asm volatile("sha1msg2 0x12345678(%rcx,%rax,1), %xmm0");
331 asm volatile("sha1msg2 0x12345678(%rbp,%rax,1), %xmm0");
332 asm volatile("sha1msg2 0x12345678(%rax,%rcx,1), %xmm0");
333 asm volatile("sha1msg2 0x12345678(%rax,%rcx,8), %xmm0");
334 asm volatile("sha1msg2 0x12345678(%rax,%rcx,8), %xmm15");
335
336 /* sha256rnds2 <XMM0>, xmm2/m128, xmm1 */
337 /* Note sha256rnds2 has an implicit operand 'xmm0' */
338
339 asm volatile("sha256rnds2 %xmm4, %xmm1");
340 asm volatile("sha256rnds2 %xmm7, %xmm2");
341 asm volatile("sha256rnds2 %xmm8, %xmm1");
342 asm volatile("sha256rnds2 %xmm7, %xmm8");
343 asm volatile("sha256rnds2 %xmm15, %xmm8");
344 asm volatile("sha256rnds2 (%rax), %xmm1");
345 asm volatile("sha256rnds2 (%r8), %xmm1");
346 asm volatile("sha256rnds2 (0x12345678), %xmm1");
347 asm volatile("sha256rnds2 (%rax), %xmm3");
348 asm volatile("sha256rnds2 (%rcx,%rax,1), %xmm1");
349 asm volatile("sha256rnds2 0x12345678(,%rax,1), %xmm1");
350 asm volatile("sha256rnds2 (%rax,%rcx,1), %xmm1");
351 asm volatile("sha256rnds2 (%rax,%rcx,8), %xmm1");
352 asm volatile("sha256rnds2 0x12(%rax), %xmm1");
353 asm volatile("sha256rnds2 0x12(%rbp), %xmm1");
354 asm volatile("sha256rnds2 0x12(%rcx,%rax,1), %xmm1");
355 asm volatile("sha256rnds2 0x12(%rbp,%rax,1), %xmm1");
356 asm volatile("sha256rnds2 0x12(%rax,%rcx,1), %xmm1");
357 asm volatile("sha256rnds2 0x12(%rax,%rcx,8), %xmm1");
358 asm volatile("sha256rnds2 0x12345678(%rax), %xmm1");
359 asm volatile("sha256rnds2 0x12345678(%rbp), %xmm1");
360 asm volatile("sha256rnds2 0x12345678(%rcx,%rax,1), %xmm1");
361 asm volatile("sha256rnds2 0x12345678(%rbp,%rax,1), %xmm1");
362 asm volatile("sha256rnds2 0x12345678(%rax,%rcx,1), %xmm1");
363 asm volatile("sha256rnds2 0x12345678(%rax,%rcx,8), %xmm1");
364 asm volatile("sha256rnds2 0x12345678(%rax,%rcx,8), %xmm15");
365
366 /* sha256msg1 xmm2/m128, xmm1 */
367
368 asm volatile("sha256msg1 %xmm1, %xmm0");
369 asm volatile("sha256msg1 %xmm7, %xmm2");
370 asm volatile("sha256msg1 %xmm8, %xmm0");
371 asm volatile("sha256msg1 %xmm7, %xmm8");
372 asm volatile("sha256msg1 %xmm15, %xmm8");
373 asm volatile("sha256msg1 (%rax), %xmm0");
374 asm volatile("sha256msg1 (%r8), %xmm0");
375 asm volatile("sha256msg1 (0x12345678), %xmm0");
376 asm volatile("sha256msg1 (%rax), %xmm3");
377 asm volatile("sha256msg1 (%rcx,%rax,1), %xmm0");
378 asm volatile("sha256msg1 0x12345678(,%rax,1), %xmm0");
379 asm volatile("sha256msg1 (%rax,%rcx,1), %xmm0");
380 asm volatile("sha256msg1 (%rax,%rcx,8), %xmm0");
381 asm volatile("sha256msg1 0x12(%rax), %xmm0");
382 asm volatile("sha256msg1 0x12(%rbp), %xmm0");
383 asm volatile("sha256msg1 0x12(%rcx,%rax,1), %xmm0");
384 asm volatile("sha256msg1 0x12(%rbp,%rax,1), %xmm0");
385 asm volatile("sha256msg1 0x12(%rax,%rcx,1), %xmm0");
386 asm volatile("sha256msg1 0x12(%rax,%rcx,8), %xmm0");
387 asm volatile("sha256msg1 0x12345678(%rax), %xmm0");
388 asm volatile("sha256msg1 0x12345678(%rbp), %xmm0");
389 asm volatile("sha256msg1 0x12345678(%rcx,%rax,1), %xmm0");
390 asm volatile("sha256msg1 0x12345678(%rbp,%rax,1), %xmm0");
391 asm volatile("sha256msg1 0x12345678(%rax,%rcx,1), %xmm0");
392 asm volatile("sha256msg1 0x12345678(%rax,%rcx,8), %xmm0");
393 asm volatile("sha256msg1 0x12345678(%rax,%rcx,8), %xmm15");
394
395 /* sha256msg2 xmm2/m128, xmm1 */
396
397 asm volatile("sha256msg2 %xmm1, %xmm0");
398 asm volatile("sha256msg2 %xmm7, %xmm2");
399 asm volatile("sha256msg2 %xmm8, %xmm0");
400 asm volatile("sha256msg2 %xmm7, %xmm8");
401 asm volatile("sha256msg2 %xmm15, %xmm8");
402 asm volatile("sha256msg2 (%rax), %xmm0");
403 asm volatile("sha256msg2 (%r8), %xmm0");
404 asm volatile("sha256msg2 (0x12345678), %xmm0");
405 asm volatile("sha256msg2 (%rax), %xmm3");
406 asm volatile("sha256msg2 (%rcx,%rax,1), %xmm0");
407 asm volatile("sha256msg2 0x12345678(,%rax,1), %xmm0");
408 asm volatile("sha256msg2 (%rax,%rcx,1), %xmm0");
409 asm volatile("sha256msg2 (%rax,%rcx,8), %xmm0");
410 asm volatile("sha256msg2 0x12(%rax), %xmm0");
411 asm volatile("sha256msg2 0x12(%rbp), %xmm0");
412 asm volatile("sha256msg2 0x12(%rcx,%rax,1), %xmm0");
413 asm volatile("sha256msg2 0x12(%rbp,%rax,1), %xmm0");
414 asm volatile("sha256msg2 0x12(%rax,%rcx,1), %xmm0");
415 asm volatile("sha256msg2 0x12(%rax,%rcx,8), %xmm0");
416 asm volatile("sha256msg2 0x12345678(%rax), %xmm0");
417 asm volatile("sha256msg2 0x12345678(%rbp), %xmm0");
418 asm volatile("sha256msg2 0x12345678(%rcx,%rax,1), %xmm0");
419 asm volatile("sha256msg2 0x12345678(%rbp,%rax,1), %xmm0");
420 asm volatile("sha256msg2 0x12345678(%rax,%rcx,1), %xmm0");
421 asm volatile("sha256msg2 0x12345678(%rax,%rcx,8), %xmm0");
422 asm volatile("sha256msg2 0x12345678(%rax,%rcx,8), %xmm15");
423
424 /* clflushopt m8 */
425
426 asm volatile("clflushopt (%rax)");
427 asm volatile("clflushopt (%r8)");
428 asm volatile("clflushopt (0x12345678)");
429 asm volatile("clflushopt 0x12345678(%rax,%rcx,8)");
430 asm volatile("clflushopt 0x12345678(%r8,%rcx,8)");
431 /* Also check instructions in the same group encoding as clflushopt */
432 asm volatile("clflush (%rax)");
433 asm volatile("clflush (%r8)");
434 asm volatile("sfence");
435
436 /* clwb m8 */
437
438 asm volatile("clwb (%rax)");
439 asm volatile("clwb (%r8)");
440 asm volatile("clwb (0x12345678)");
441 asm volatile("clwb 0x12345678(%rax,%rcx,8)");
442 asm volatile("clwb 0x12345678(%r8,%rcx,8)");
443 /* Also check instructions in the same group encoding as clwb */
444 asm volatile("xsaveopt (%rax)");
445 asm volatile("xsaveopt (%r8)");
446 asm volatile("mfence");
447
448 /* xsavec mem */
449
450 asm volatile("xsavec (%rax)");
451 asm volatile("xsavec (%r8)");
452 asm volatile("xsavec (0x12345678)");
453 asm volatile("xsavec 0x12345678(%rax,%rcx,8)");
454 asm volatile("xsavec 0x12345678(%r8,%rcx,8)");
455
456 /* xsaves mem */
457
458 asm volatile("xsaves (%rax)");
459 asm volatile("xsaves (%r8)");
460 asm volatile("xsaves (0x12345678)");
461 asm volatile("xsaves 0x12345678(%rax,%rcx,8)");
462 asm volatile("xsaves 0x12345678(%r8,%rcx,8)");
463
464 /* xrstors mem */
465
466 asm volatile("xrstors (%rax)");
467 asm volatile("xrstors (%r8)");
468 asm volatile("xrstors (0x12345678)");
469 asm volatile("xrstors 0x12345678(%rax,%rcx,8)");
470 asm volatile("xrstors 0x12345678(%r8,%rcx,8)");
471
472#else /* #ifdef __x86_64__ */
473
474 /* bndmk m32, bnd */
475
476 asm volatile("bndmk (%eax), %bnd0");
477 asm volatile("bndmk (0x12345678), %bnd0");
478 asm volatile("bndmk (%eax), %bnd3");
479 asm volatile("bndmk (%ecx,%eax,1), %bnd0");
480 asm volatile("bndmk 0x12345678(,%eax,1), %bnd0");
481 asm volatile("bndmk (%eax,%ecx,1), %bnd0");
482 asm volatile("bndmk (%eax,%ecx,8), %bnd0");
483 asm volatile("bndmk 0x12(%eax), %bnd0");
484 asm volatile("bndmk 0x12(%ebp), %bnd0");
485 asm volatile("bndmk 0x12(%ecx,%eax,1), %bnd0");
486 asm volatile("bndmk 0x12(%ebp,%eax,1), %bnd0");
487 asm volatile("bndmk 0x12(%eax,%ecx,1), %bnd0");
488 asm volatile("bndmk 0x12(%eax,%ecx,8), %bnd0");
489 asm volatile("bndmk 0x12345678(%eax), %bnd0");
490 asm volatile("bndmk 0x12345678(%ebp), %bnd0");
491 asm volatile("bndmk 0x12345678(%ecx,%eax,1), %bnd0");
492 asm volatile("bndmk 0x12345678(%ebp,%eax,1), %bnd0");
493 asm volatile("bndmk 0x12345678(%eax,%ecx,1), %bnd0");
494 asm volatile("bndmk 0x12345678(%eax,%ecx,8), %bnd0");
495
496 /* bndcl r/m32, bnd */
497
498 asm volatile("bndcl (%eax), %bnd0");
499 asm volatile("bndcl (0x12345678), %bnd0");
500 asm volatile("bndcl (%eax), %bnd3");
501 asm volatile("bndcl (%ecx,%eax,1), %bnd0");
502 asm volatile("bndcl 0x12345678(,%eax,1), %bnd0");
503 asm volatile("bndcl (%eax,%ecx,1), %bnd0");
504 asm volatile("bndcl (%eax,%ecx,8), %bnd0");
505 asm volatile("bndcl 0x12(%eax), %bnd0");
506 asm volatile("bndcl 0x12(%ebp), %bnd0");
507 asm volatile("bndcl 0x12(%ecx,%eax,1), %bnd0");
508 asm volatile("bndcl 0x12(%ebp,%eax,1), %bnd0");
509 asm volatile("bndcl 0x12(%eax,%ecx,1), %bnd0");
510 asm volatile("bndcl 0x12(%eax,%ecx,8), %bnd0");
511 asm volatile("bndcl 0x12345678(%eax), %bnd0");
512 asm volatile("bndcl 0x12345678(%ebp), %bnd0");
513 asm volatile("bndcl 0x12345678(%ecx,%eax,1), %bnd0");
514 asm volatile("bndcl 0x12345678(%ebp,%eax,1), %bnd0");
515 asm volatile("bndcl 0x12345678(%eax,%ecx,1), %bnd0");
516 asm volatile("bndcl 0x12345678(%eax,%ecx,8), %bnd0");
517 asm volatile("bndcl %eax, %bnd0");
518
519 /* bndcu r/m32, bnd */
520
521 asm volatile("bndcu (%eax), %bnd0");
522 asm volatile("bndcu (0x12345678), %bnd0");
523 asm volatile("bndcu (%eax), %bnd3");
524 asm volatile("bndcu (%ecx,%eax,1), %bnd0");
525 asm volatile("bndcu 0x12345678(,%eax,1), %bnd0");
526 asm volatile("bndcu (%eax,%ecx,1), %bnd0");
527 asm volatile("bndcu (%eax,%ecx,8), %bnd0");
528 asm volatile("bndcu 0x12(%eax), %bnd0");
529 asm volatile("bndcu 0x12(%ebp), %bnd0");
530 asm volatile("bndcu 0x12(%ecx,%eax,1), %bnd0");
531 asm volatile("bndcu 0x12(%ebp,%eax,1), %bnd0");
532 asm volatile("bndcu 0x12(%eax,%ecx,1), %bnd0");
533 asm volatile("bndcu 0x12(%eax,%ecx,8), %bnd0");
534 asm volatile("bndcu 0x12345678(%eax), %bnd0");
535 asm volatile("bndcu 0x12345678(%ebp), %bnd0");
536 asm volatile("bndcu 0x12345678(%ecx,%eax,1), %bnd0");
537 asm volatile("bndcu 0x12345678(%ebp,%eax,1), %bnd0");
538 asm volatile("bndcu 0x12345678(%eax,%ecx,1), %bnd0");
539 asm volatile("bndcu 0x12345678(%eax,%ecx,8), %bnd0");
540 asm volatile("bndcu %eax, %bnd0");
541
542 /* bndcn r/m32, bnd */
543
544 asm volatile("bndcn (%eax), %bnd0");
545 asm volatile("bndcn (0x12345678), %bnd0");
546 asm volatile("bndcn (%eax), %bnd3");
547 asm volatile("bndcn (%ecx,%eax,1), %bnd0");
548 asm volatile("bndcn 0x12345678(,%eax,1), %bnd0");
549 asm volatile("bndcn (%eax,%ecx,1), %bnd0");
550 asm volatile("bndcn (%eax,%ecx,8), %bnd0");
551 asm volatile("bndcn 0x12(%eax), %bnd0");
552 asm volatile("bndcn 0x12(%ebp), %bnd0");
553 asm volatile("bndcn 0x12(%ecx,%eax,1), %bnd0");
554 asm volatile("bndcn 0x12(%ebp,%eax,1), %bnd0");
555 asm volatile("bndcn 0x12(%eax,%ecx,1), %bnd0");
556 asm volatile("bndcn 0x12(%eax,%ecx,8), %bnd0");
557 asm volatile("bndcn 0x12345678(%eax), %bnd0");
558 asm volatile("bndcn 0x12345678(%ebp), %bnd0");
559 asm volatile("bndcn 0x12345678(%ecx,%eax,1), %bnd0");
560 asm volatile("bndcn 0x12345678(%ebp,%eax,1), %bnd0");
561 asm volatile("bndcn 0x12345678(%eax,%ecx,1), %bnd0");
562 asm volatile("bndcn 0x12345678(%eax,%ecx,8), %bnd0");
563 asm volatile("bndcn %eax, %bnd0");
564
565 /* bndmov m64, bnd */
566
567 asm volatile("bndmov (%eax), %bnd0");
568 asm volatile("bndmov (0x12345678), %bnd0");
569 asm volatile("bndmov (%eax), %bnd3");
570 asm volatile("bndmov (%ecx,%eax,1), %bnd0");
571 asm volatile("bndmov 0x12345678(,%eax,1), %bnd0");
572 asm volatile("bndmov (%eax,%ecx,1), %bnd0");
573 asm volatile("bndmov (%eax,%ecx,8), %bnd0");
574 asm volatile("bndmov 0x12(%eax), %bnd0");
575 asm volatile("bndmov 0x12(%ebp), %bnd0");
576 asm volatile("bndmov 0x12(%ecx,%eax,1), %bnd0");
577 asm volatile("bndmov 0x12(%ebp,%eax,1), %bnd0");
578 asm volatile("bndmov 0x12(%eax,%ecx,1), %bnd0");
579 asm volatile("bndmov 0x12(%eax,%ecx,8), %bnd0");
580 asm volatile("bndmov 0x12345678(%eax), %bnd0");
581 asm volatile("bndmov 0x12345678(%ebp), %bnd0");
582 asm volatile("bndmov 0x12345678(%ecx,%eax,1), %bnd0");
583 asm volatile("bndmov 0x12345678(%ebp,%eax,1), %bnd0");
584 asm volatile("bndmov 0x12345678(%eax,%ecx,1), %bnd0");
585 asm volatile("bndmov 0x12345678(%eax,%ecx,8), %bnd0");
586
587 /* bndmov bnd, m64 */
588
589 asm volatile("bndmov %bnd0, (%eax)");
590 asm volatile("bndmov %bnd0, (0x12345678)");
591 asm volatile("bndmov %bnd3, (%eax)");
592 asm volatile("bndmov %bnd0, (%ecx,%eax,1)");
593 asm volatile("bndmov %bnd0, 0x12345678(,%eax,1)");
594 asm volatile("bndmov %bnd0, (%eax,%ecx,1)");
595 asm volatile("bndmov %bnd0, (%eax,%ecx,8)");
596 asm volatile("bndmov %bnd0, 0x12(%eax)");
597 asm volatile("bndmov %bnd0, 0x12(%ebp)");
598 asm volatile("bndmov %bnd0, 0x12(%ecx,%eax,1)");
599 asm volatile("bndmov %bnd0, 0x12(%ebp,%eax,1)");
600 asm volatile("bndmov %bnd0, 0x12(%eax,%ecx,1)");
601 asm volatile("bndmov %bnd0, 0x12(%eax,%ecx,8)");
602 asm volatile("bndmov %bnd0, 0x12345678(%eax)");
603 asm volatile("bndmov %bnd0, 0x12345678(%ebp)");
604 asm volatile("bndmov %bnd0, 0x12345678(%ecx,%eax,1)");
605 asm volatile("bndmov %bnd0, 0x12345678(%ebp,%eax,1)");
606 asm volatile("bndmov %bnd0, 0x12345678(%eax,%ecx,1)");
607 asm volatile("bndmov %bnd0, 0x12345678(%eax,%ecx,8)");
608
609 /* bndmov bnd2, bnd1 */
610
611 asm volatile("bndmov %bnd0, %bnd1");
612 asm volatile("bndmov %bnd1, %bnd0");
613
614 /* bndldx mib, bnd */
615
616 asm volatile("bndldx (%eax), %bnd0");
617 asm volatile("bndldx (0x12345678), %bnd0");
618 asm volatile("bndldx (%eax), %bnd3");
619 asm volatile("bndldx (%ecx,%eax,1), %bnd0");
620 asm volatile("bndldx 0x12345678(,%eax,1), %bnd0");
621 asm volatile("bndldx (%eax,%ecx,1), %bnd0");
622 asm volatile("bndldx 0x12(%eax), %bnd0");
623 asm volatile("bndldx 0x12(%ebp), %bnd0");
624 asm volatile("bndldx 0x12(%ecx,%eax,1), %bnd0");
625 asm volatile("bndldx 0x12(%ebp,%eax,1), %bnd0");
626 asm volatile("bndldx 0x12(%eax,%ecx,1), %bnd0");
627 asm volatile("bndldx 0x12345678(%eax), %bnd0");
628 asm volatile("bndldx 0x12345678(%ebp), %bnd0");
629 asm volatile("bndldx 0x12345678(%ecx,%eax,1), %bnd0");
630 asm volatile("bndldx 0x12345678(%ebp,%eax,1), %bnd0");
631 asm volatile("bndldx 0x12345678(%eax,%ecx,1), %bnd0");
632
633 /* bndstx bnd, mib */
634
635 asm volatile("bndstx %bnd0, (%eax)");
636 asm volatile("bndstx %bnd0, (0x12345678)");
637 asm volatile("bndstx %bnd3, (%eax)");
638 asm volatile("bndstx %bnd0, (%ecx,%eax,1)");
639 asm volatile("bndstx %bnd0, 0x12345678(,%eax,1)");
640 asm volatile("bndstx %bnd0, (%eax,%ecx,1)");
641 asm volatile("bndstx %bnd0, 0x12(%eax)");
642 asm volatile("bndstx %bnd0, 0x12(%ebp)");
643 asm volatile("bndstx %bnd0, 0x12(%ecx,%eax,1)");
644 asm volatile("bndstx %bnd0, 0x12(%ebp,%eax,1)");
645 asm volatile("bndstx %bnd0, 0x12(%eax,%ecx,1)");
646 asm volatile("bndstx %bnd0, 0x12345678(%eax)");
647 asm volatile("bndstx %bnd0, 0x12345678(%ebp)");
648 asm volatile("bndstx %bnd0, 0x12345678(%ecx,%eax,1)");
649 asm volatile("bndstx %bnd0, 0x12345678(%ebp,%eax,1)");
650 asm volatile("bndstx %bnd0, 0x12345678(%eax,%ecx,1)");
651
652 /* bnd prefix on call, ret, jmp and all jcc */
653
654 asm volatile("bnd call label1"); /* Expecting: call unconditional 0xfffffffc */
655 asm volatile("bnd call *(%eax)"); /* Expecting: call indirect 0 */
656 asm volatile("bnd ret"); /* Expecting: ret indirect 0 */
657 asm volatile("bnd jmp label1"); /* Expecting: jmp unconditional 0xfffffffc */
658 asm volatile("bnd jmp label1"); /* Expecting: jmp unconditional 0xfffffffc */
659 asm volatile("bnd jmp *(%ecx)"); /* Expecting: jmp indirect 0 */
660 asm volatile("bnd jne label1"); /* Expecting: jcc conditional 0xfffffffc */
661
662 /* sha1rnds4 imm8, xmm2/m128, xmm1 */
663
664 asm volatile("sha1rnds4 $0x0, %xmm1, %xmm0");
665 asm volatile("sha1rnds4 $0x91, %xmm7, %xmm2");
666 asm volatile("sha1rnds4 $0x91, (%eax), %xmm0");
667 asm volatile("sha1rnds4 $0x91, (0x12345678), %xmm0");
668 asm volatile("sha1rnds4 $0x91, (%eax), %xmm3");
669 asm volatile("sha1rnds4 $0x91, (%ecx,%eax,1), %xmm0");
670 asm volatile("sha1rnds4 $0x91, 0x12345678(,%eax,1), %xmm0");
671 asm volatile("sha1rnds4 $0x91, (%eax,%ecx,1), %xmm0");
672 asm volatile("sha1rnds4 $0x91, (%eax,%ecx,8), %xmm0");
673 asm volatile("sha1rnds4 $0x91, 0x12(%eax), %xmm0");
674 asm volatile("sha1rnds4 $0x91, 0x12(%ebp), %xmm0");
675 asm volatile("sha1rnds4 $0x91, 0x12(%ecx,%eax,1), %xmm0");
676 asm volatile("sha1rnds4 $0x91, 0x12(%ebp,%eax,1), %xmm0");
677 asm volatile("sha1rnds4 $0x91, 0x12(%eax,%ecx,1), %xmm0");
678 asm volatile("sha1rnds4 $0x91, 0x12(%eax,%ecx,8), %xmm0");
679 asm volatile("sha1rnds4 $0x91, 0x12345678(%eax), %xmm0");
680 asm volatile("sha1rnds4 $0x91, 0x12345678(%ebp), %xmm0");
681 asm volatile("sha1rnds4 $0x91, 0x12345678(%ecx,%eax,1), %xmm0");
682 asm volatile("sha1rnds4 $0x91, 0x12345678(%ebp,%eax,1), %xmm0");
683 asm volatile("sha1rnds4 $0x91, 0x12345678(%eax,%ecx,1), %xmm0");
684 asm volatile("sha1rnds4 $0x91, 0x12345678(%eax,%ecx,8), %xmm0");
685
686 /* sha1nexte xmm2/m128, xmm1 */
687
688 asm volatile("sha1nexte %xmm1, %xmm0");
689 asm volatile("sha1nexte %xmm7, %xmm2");
690 asm volatile("sha1nexte (%eax), %xmm0");
691 asm volatile("sha1nexte (0x12345678), %xmm0");
692 asm volatile("sha1nexte (%eax), %xmm3");
693 asm volatile("sha1nexte (%ecx,%eax,1), %xmm0");
694 asm volatile("sha1nexte 0x12345678(,%eax,1), %xmm0");
695 asm volatile("sha1nexte (%eax,%ecx,1), %xmm0");
696 asm volatile("sha1nexte (%eax,%ecx,8), %xmm0");
697 asm volatile("sha1nexte 0x12(%eax), %xmm0");
698 asm volatile("sha1nexte 0x12(%ebp), %xmm0");
699 asm volatile("sha1nexte 0x12(%ecx,%eax,1), %xmm0");
700 asm volatile("sha1nexte 0x12(%ebp,%eax,1), %xmm0");
701 asm volatile("sha1nexte 0x12(%eax,%ecx,1), %xmm0");
702 asm volatile("sha1nexte 0x12(%eax,%ecx,8), %xmm0");
703 asm volatile("sha1nexte 0x12345678(%eax), %xmm0");
704 asm volatile("sha1nexte 0x12345678(%ebp), %xmm0");
705 asm volatile("sha1nexte 0x12345678(%ecx,%eax,1), %xmm0");
706 asm volatile("sha1nexte 0x12345678(%ebp,%eax,1), %xmm0");
707 asm volatile("sha1nexte 0x12345678(%eax,%ecx,1), %xmm0");
708 asm volatile("sha1nexte 0x12345678(%eax,%ecx,8), %xmm0");
709
710 /* sha1msg1 xmm2/m128, xmm1 */
711
712 asm volatile("sha1msg1 %xmm1, %xmm0");
713 asm volatile("sha1msg1 %xmm7, %xmm2");
714 asm volatile("sha1msg1 (%eax), %xmm0");
715 asm volatile("sha1msg1 (0x12345678), %xmm0");
716 asm volatile("sha1msg1 (%eax), %xmm3");
717 asm volatile("sha1msg1 (%ecx,%eax,1), %xmm0");
718 asm volatile("sha1msg1 0x12345678(,%eax,1), %xmm0");
719 asm volatile("sha1msg1 (%eax,%ecx,1), %xmm0");
720 asm volatile("sha1msg1 (%eax,%ecx,8), %xmm0");
721 asm volatile("sha1msg1 0x12(%eax), %xmm0");
722 asm volatile("sha1msg1 0x12(%ebp), %xmm0");
723 asm volatile("sha1msg1 0x12(%ecx,%eax,1), %xmm0");
724 asm volatile("sha1msg1 0x12(%ebp,%eax,1), %xmm0");
725 asm volatile("sha1msg1 0x12(%eax,%ecx,1), %xmm0");
726 asm volatile("sha1msg1 0x12(%eax,%ecx,8), %xmm0");
727 asm volatile("sha1msg1 0x12345678(%eax), %xmm0");
728 asm volatile("sha1msg1 0x12345678(%ebp), %xmm0");
729 asm volatile("sha1msg1 0x12345678(%ecx,%eax,1), %xmm0");
730 asm volatile("sha1msg1 0x12345678(%ebp,%eax,1), %xmm0");
731 asm volatile("sha1msg1 0x12345678(%eax,%ecx,1), %xmm0");
732 asm volatile("sha1msg1 0x12345678(%eax,%ecx,8), %xmm0");
733
734 /* sha1msg2 xmm2/m128, xmm1 */
735
736 asm volatile("sha1msg2 %xmm1, %xmm0");
737 asm volatile("sha1msg2 %xmm7, %xmm2");
738 asm volatile("sha1msg2 (%eax), %xmm0");
739 asm volatile("sha1msg2 (0x12345678), %xmm0");
740 asm volatile("sha1msg2 (%eax), %xmm3");
741 asm volatile("sha1msg2 (%ecx,%eax,1), %xmm0");
742 asm volatile("sha1msg2 0x12345678(,%eax,1), %xmm0");
743 asm volatile("sha1msg2 (%eax,%ecx,1), %xmm0");
744 asm volatile("sha1msg2 (%eax,%ecx,8), %xmm0");
745 asm volatile("sha1msg2 0x12(%eax), %xmm0");
746 asm volatile("sha1msg2 0x12(%ebp), %xmm0");
747 asm volatile("sha1msg2 0x12(%ecx,%eax,1), %xmm0");
748 asm volatile("sha1msg2 0x12(%ebp,%eax,1), %xmm0");
749 asm volatile("sha1msg2 0x12(%eax,%ecx,1), %xmm0");
750 asm volatile("sha1msg2 0x12(%eax,%ecx,8), %xmm0");
751 asm volatile("sha1msg2 0x12345678(%eax), %xmm0");
752 asm volatile("sha1msg2 0x12345678(%ebp), %xmm0");
753 asm volatile("sha1msg2 0x12345678(%ecx,%eax,1), %xmm0");
754 asm volatile("sha1msg2 0x12345678(%ebp,%eax,1), %xmm0");
755 asm volatile("sha1msg2 0x12345678(%eax,%ecx,1), %xmm0");
756 asm volatile("sha1msg2 0x12345678(%eax,%ecx,8), %xmm0");
757
758 /* sha256rnds2 <XMM0>, xmm2/m128, xmm1 */
759 /* Note sha256rnds2 has an implicit operand 'xmm0' */
760
761 asm volatile("sha256rnds2 %xmm4, %xmm1");
762 asm volatile("sha256rnds2 %xmm7, %xmm2");
763 asm volatile("sha256rnds2 (%eax), %xmm1");
764 asm volatile("sha256rnds2 (0x12345678), %xmm1");
765 asm volatile("sha256rnds2 (%eax), %xmm3");
766 asm volatile("sha256rnds2 (%ecx,%eax,1), %xmm1");
767 asm volatile("sha256rnds2 0x12345678(,%eax,1), %xmm1");
768 asm volatile("sha256rnds2 (%eax,%ecx,1), %xmm1");
769 asm volatile("sha256rnds2 (%eax,%ecx,8), %xmm1");
770 asm volatile("sha256rnds2 0x12(%eax), %xmm1");
771 asm volatile("sha256rnds2 0x12(%ebp), %xmm1");
772 asm volatile("sha256rnds2 0x12(%ecx,%eax,1), %xmm1");
773 asm volatile("sha256rnds2 0x12(%ebp,%eax,1), %xmm1");
774 asm volatile("sha256rnds2 0x12(%eax,%ecx,1), %xmm1");
775 asm volatile("sha256rnds2 0x12(%eax,%ecx,8), %xmm1");
776 asm volatile("sha256rnds2 0x12345678(%eax), %xmm1");
777 asm volatile("sha256rnds2 0x12345678(%ebp), %xmm1");
778 asm volatile("sha256rnds2 0x12345678(%ecx,%eax,1), %xmm1");
779 asm volatile("sha256rnds2 0x12345678(%ebp,%eax,1), %xmm1");
780 asm volatile("sha256rnds2 0x12345678(%eax,%ecx,1), %xmm1");
781 asm volatile("sha256rnds2 0x12345678(%eax,%ecx,8), %xmm1");
782
783 /* sha256msg1 xmm2/m128, xmm1 */
784
785 asm volatile("sha256msg1 %xmm1, %xmm0");
786 asm volatile("sha256msg1 %xmm7, %xmm2");
787 asm volatile("sha256msg1 (%eax), %xmm0");
788 asm volatile("sha256msg1 (0x12345678), %xmm0");
789 asm volatile("sha256msg1 (%eax), %xmm3");
790 asm volatile("sha256msg1 (%ecx,%eax,1), %xmm0");
791 asm volatile("sha256msg1 0x12345678(,%eax,1), %xmm0");
792 asm volatile("sha256msg1 (%eax,%ecx,1), %xmm0");
793 asm volatile("sha256msg1 (%eax,%ecx,8), %xmm0");
794 asm volatile("sha256msg1 0x12(%eax), %xmm0");
795 asm volatile("sha256msg1 0x12(%ebp), %xmm0");
796 asm volatile("sha256msg1 0x12(%ecx,%eax,1), %xmm0");
797 asm volatile("sha256msg1 0x12(%ebp,%eax,1), %xmm0");
798 asm volatile("sha256msg1 0x12(%eax,%ecx,1), %xmm0");
799 asm volatile("sha256msg1 0x12(%eax,%ecx,8), %xmm0");
800 asm volatile("sha256msg1 0x12345678(%eax), %xmm0");
801 asm volatile("sha256msg1 0x12345678(%ebp), %xmm0");
802 asm volatile("sha256msg1 0x12345678(%ecx,%eax,1), %xmm0");
803 asm volatile("sha256msg1 0x12345678(%ebp,%eax,1), %xmm0");
804 asm volatile("sha256msg1 0x12345678(%eax,%ecx,1), %xmm0");
805 asm volatile("sha256msg1 0x12345678(%eax,%ecx,8), %xmm0");
806
807 /* sha256msg2 xmm2/m128, xmm1 */
808
809 asm volatile("sha256msg2 %xmm1, %xmm0");
810 asm volatile("sha256msg2 %xmm7, %xmm2");
811 asm volatile("sha256msg2 (%eax), %xmm0");
812 asm volatile("sha256msg2 (0x12345678), %xmm0");
813 asm volatile("sha256msg2 (%eax), %xmm3");
814 asm volatile("sha256msg2 (%ecx,%eax,1), %xmm0");
815 asm volatile("sha256msg2 0x12345678(,%eax,1), %xmm0");
816 asm volatile("sha256msg2 (%eax,%ecx,1), %xmm0");
817 asm volatile("sha256msg2 (%eax,%ecx,8), %xmm0");
818 asm volatile("sha256msg2 0x12(%eax), %xmm0");
819 asm volatile("sha256msg2 0x12(%ebp), %xmm0");
820 asm volatile("sha256msg2 0x12(%ecx,%eax,1), %xmm0");
821 asm volatile("sha256msg2 0x12(%ebp,%eax,1), %xmm0");
822 asm volatile("sha256msg2 0x12(%eax,%ecx,1), %xmm0");
823 asm volatile("sha256msg2 0x12(%eax,%ecx,8), %xmm0");
824 asm volatile("sha256msg2 0x12345678(%eax), %xmm0");
825 asm volatile("sha256msg2 0x12345678(%ebp), %xmm0");
826 asm volatile("sha256msg2 0x12345678(%ecx,%eax,1), %xmm0");
827 asm volatile("sha256msg2 0x12345678(%ebp,%eax,1), %xmm0");
828 asm volatile("sha256msg2 0x12345678(%eax,%ecx,1), %xmm0");
829 asm volatile("sha256msg2 0x12345678(%eax,%ecx,8), %xmm0");
830
831 /* clflushopt m8 */
832
833 asm volatile("clflushopt (%eax)");
834 asm volatile("clflushopt (0x12345678)");
835 asm volatile("clflushopt 0x12345678(%eax,%ecx,8)");
836 /* Also check instructions in the same group encoding as clflushopt */
837 asm volatile("clflush (%eax)");
838 asm volatile("sfence");
839
840 /* clwb m8 */
841
842 asm volatile("clwb (%eax)");
843 asm volatile("clwb (0x12345678)");
844 asm volatile("clwb 0x12345678(%eax,%ecx,8)");
845 /* Also check instructions in the same group encoding as clwb */
846 asm volatile("xsaveopt (%eax)");
847 asm volatile("mfence");
848
849 /* xsavec mem */
850
851 asm volatile("xsavec (%eax)");
852 asm volatile("xsavec (0x12345678)");
853 asm volatile("xsavec 0x12345678(%eax,%ecx,8)");
854
855 /* xsaves mem */
856
857 asm volatile("xsaves (%eax)");
858 asm volatile("xsaves (0x12345678)");
859 asm volatile("xsaves 0x12345678(%eax,%ecx,8)");
860
861 /* xrstors mem */
862
863 asm volatile("xrstors (%eax)");
864 asm volatile("xrstors (0x12345678)");
865 asm volatile("xrstors 0x12345678(%eax,%ecx,8)");
866
867#endif /* #ifndef __x86_64__ */
868
869 /* pcommit */
870
871 asm volatile("pcommit");
872
873 /* Following line is a marker for the awk script - do not change */
874 asm volatile("rdtsc"); /* Stop here */
875
876 return 0;
877}
diff --git a/tools/perf/arch/x86/tests/insn-x86.c b/tools/perf/arch/x86/tests/insn-x86.c
new file mode 100644
index 000000000000..b6115dfd28f0
--- /dev/null
+++ b/tools/perf/arch/x86/tests/insn-x86.c
@@ -0,0 +1,185 @@
1#include <linux/types.h>
2
3#include "debug.h"
4#include "tests/tests.h"
5#include "arch-tests.h"
6
7#include "intel-pt-decoder/insn.h"
8#include "intel-pt-decoder/intel-pt-insn-decoder.h"
9
10struct test_data {
11 u8 data[MAX_INSN_SIZE];
12 int expected_length;
13 int expected_rel;
14 const char *expected_op_str;
15 const char *expected_branch_str;
16 const char *asm_rep;
17};
18
19struct test_data test_data_32[] = {
20#include "insn-x86-dat-32.c"
21 {{0x0f, 0x01, 0xee}, 3, 0, NULL, NULL, "0f 01 ee \trdpkru"},
22 {{0x0f, 0x01, 0xef}, 3, 0, NULL, NULL, "0f 01 ef \twrpkru"},
23 {{0}, 0, 0, NULL, NULL, NULL},
24};
25
26struct test_data test_data_64[] = {
27#include "insn-x86-dat-64.c"
28 {{0x0f, 0x01, 0xee}, 3, 0, NULL, NULL, "0f 01 ee \trdpkru"},
29 {{0x0f, 0x01, 0xef}, 3, 0, NULL, NULL, "0f 01 ef \twrpkru"},
30 {{0}, 0, 0, NULL, NULL, NULL},
31};
32
33static int get_op(const char *op_str)
34{
35 struct val_data {
36 const char *name;
37 int val;
38 } vals[] = {
39 {"other", INTEL_PT_OP_OTHER},
40 {"call", INTEL_PT_OP_CALL},
41 {"ret", INTEL_PT_OP_RET},
42 {"jcc", INTEL_PT_OP_JCC},
43 {"jmp", INTEL_PT_OP_JMP},
44 {"loop", INTEL_PT_OP_LOOP},
45 {"iret", INTEL_PT_OP_IRET},
46 {"int", INTEL_PT_OP_INT},
47 {"syscall", INTEL_PT_OP_SYSCALL},
48 {"sysret", INTEL_PT_OP_SYSRET},
49 {NULL, 0},
50 };
51 struct val_data *val;
52
53 if (!op_str || !strlen(op_str))
54 return 0;
55
56 for (val = vals; val->name; val++) {
57 if (!strcmp(val->name, op_str))
58 return val->val;
59 }
60
61 pr_debug("Failed to get op\n");
62
63 return -1;
64}
65
66static int get_branch(const char *branch_str)
67{
68 struct val_data {
69 const char *name;
70 int val;
71 } vals[] = {
72 {"no_branch", INTEL_PT_BR_NO_BRANCH},
73 {"indirect", INTEL_PT_BR_INDIRECT},
74 {"conditional", INTEL_PT_BR_CONDITIONAL},
75 {"unconditional", INTEL_PT_BR_UNCONDITIONAL},
76 {NULL, 0},
77 };
78 struct val_data *val;
79
80 if (!branch_str || !strlen(branch_str))
81 return 0;
82
83 for (val = vals; val->name; val++) {
84 if (!strcmp(val->name, branch_str))
85 return val->val;
86 }
87
88 pr_debug("Failed to get branch\n");
89
90 return -1;
91}
92
93static int test_data_item(struct test_data *dat, int x86_64)
94{
95 struct intel_pt_insn intel_pt_insn;
96 struct insn insn;
97 int op, branch;
98
99 insn_init(&insn, dat->data, MAX_INSN_SIZE, x86_64);
100 insn_get_length(&insn);
101
102 if (!insn_complete(&insn)) {
103 pr_debug("Failed to decode: %s\n", dat->asm_rep);
104 return -1;
105 }
106
107 if (insn.length != dat->expected_length) {
108 pr_debug("Failed to decode length (%d vs expected %d): %s\n",
109 insn.length, dat->expected_length, dat->asm_rep);
110 return -1;
111 }
112
113 op = get_op(dat->expected_op_str);
114 branch = get_branch(dat->expected_branch_str);
115
116 if (intel_pt_get_insn(dat->data, MAX_INSN_SIZE, x86_64, &intel_pt_insn)) {
117 pr_debug("Intel PT failed to decode: %s\n", dat->asm_rep);
118 return -1;
119 }
120
121 if ((int)intel_pt_insn.op != op) {
122 pr_debug("Failed to decode 'op' value (%d vs expected %d): %s\n",
123 intel_pt_insn.op, op, dat->asm_rep);
124 return -1;
125 }
126
127 if ((int)intel_pt_insn.branch != branch) {
128 pr_debug("Failed to decode 'branch' value (%d vs expected %d): %s\n",
129 intel_pt_insn.branch, branch, dat->asm_rep);
130 return -1;
131 }
132
133 if (intel_pt_insn.rel != dat->expected_rel) {
134 pr_debug("Failed to decode 'rel' value (%#x vs expected %#x): %s\n",
135 intel_pt_insn.rel, dat->expected_rel, dat->asm_rep);
136 return -1;
137 }
138
139 pr_debug("Decoded ok: %s\n", dat->asm_rep);
140
141 return 0;
142}
143
144static int test_data_set(struct test_data *dat_set, int x86_64)
145{
146 struct test_data *dat;
147 int ret = 0;
148
149 for (dat = dat_set; dat->expected_length; dat++) {
150 if (test_data_item(dat, x86_64))
151 ret = -1;
152 }
153
154 return ret;
155}
156
157/**
158 * test__insn_x86 - test x86 instruction decoder - new instructions.
159 *
160 * This function implements a test that decodes a selection of instructions and
161 * checks the results. The Intel PT function that further categorizes
162 * instructions (i.e. intel_pt_get_insn()) is also checked.
163 *
164 * The instructions are originally in insn-x86-dat-src.c which has been
165 * processed by scripts gen-insn-x86-dat.sh and gen-insn-x86-dat.awk to produce
166 * insn-x86-dat-32.c and insn-x86-dat-64.c which are included into this program.
167 * i.e. to add new instructions to the test, edit insn-x86-dat-src.c, run the
168 * gen-insn-x86-dat.sh script, make perf, and then run the test.
169 *
170 * If the test passes %0 is returned, otherwise %-1 is returned. Use the
171 * verbose (-v) option to see all the instructions and whether or not they
172 * decoded successfuly.
173 */
174int test__insn_x86(void)
175{
176 int ret = 0;
177
178 if (test_data_set(test_data_32, 0))
179 ret = -1;
180
181 if (test_data_set(test_data_64, 1))
182 ret = -1;
183
184 return ret;
185}
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
new file mode 100644
index 000000000000..d28c1b6a3b54
--- /dev/null
+++ b/tools/perf/arch/x86/tests/intel-cqm.c
@@ -0,0 +1,124 @@
1#include "tests/tests.h"
2#include "perf.h"
3#include "cloexec.h"
4#include "debug.h"
5#include "evlist.h"
6#include "evsel.h"
7#include "arch-tests.h"
8
9#include <sys/mman.h>
10#include <string.h>
11
12static pid_t spawn(void)
13{
14 pid_t pid;
15
16 pid = fork();
17 if (pid)
18 return pid;
19
20 while(1);
21 sleep(5);
22 return 0;
23}
24
25/*
26 * Create an event group that contains both a sampled hardware
27 * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then
28 * wait for the hardware perf counter to overflow and generate a PMI,
29 * which triggers an event read for both of the events in the group.
30 *
31 * Since reading Intel CQM event counters requires sending SMP IPIs, the
32 * CQM pmu needs to handle the above situation gracefully, and return
33 * the last read counter value to avoid triggering a WARN_ON_ONCE() in
34 * smp_call_function_many() caused by sending IPIs from NMI context.
35 */
36int test__intel_cqm_count_nmi_context(void)
37{
38 struct perf_evlist *evlist = NULL;
39 struct perf_evsel *evsel = NULL;
40 struct perf_event_attr pe;
41 int i, fd[2], flag, ret;
42 size_t mmap_len;
43 void *event;
44 pid_t pid;
45 int err = TEST_FAIL;
46
47 flag = perf_event_open_cloexec_flag();
48
49 evlist = perf_evlist__new();
50 if (!evlist) {
51 pr_debug("perf_evlist__new failed\n");
52 return TEST_FAIL;
53 }
54
55 ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
56 if (ret) {
57 pr_debug("parse_events failed\n");
58 err = TEST_SKIP;
59 goto out;
60 }
61
62 evsel = perf_evlist__first(evlist);
63 if (!evsel) {
64 pr_debug("perf_evlist__first failed\n");
65 goto out;
66 }
67
68 memset(&pe, 0, sizeof(pe));
69 pe.size = sizeof(pe);
70
71 pe.type = PERF_TYPE_HARDWARE;
72 pe.config = PERF_COUNT_HW_CPU_CYCLES;
73 pe.read_format = PERF_FORMAT_GROUP;
74
75 pe.sample_period = 128;
76 pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ;
77
78 pid = spawn();
79
80 fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag);
81 if (fd[0] < 0) {
82 pr_debug("failed to open event\n");
83 goto out;
84 }
85
86 memset(&pe, 0, sizeof(pe));
87 pe.size = sizeof(pe);
88
89 pe.type = evsel->attr.type;
90 pe.config = evsel->attr.config;
91
92 fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag);
93 if (fd[1] < 0) {
94 pr_debug("failed to open event\n");
95 goto out;
96 }
97
98 /*
99 * Pick a power-of-two number of pages + 1 for the meta-data
100 * page (struct perf_event_mmap_page). See tools/perf/design.txt.
101 */
102 mmap_len = page_size * 65;
103
104 event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0);
105 if (event == (void *)(-1)) {
106 pr_debug("failed to mmap %d\n", errno);
107 goto out;
108 }
109
110 sleep(1);
111
112 err = TEST_OK;
113
114 munmap(event, mmap_len);
115
116 for (i = 0; i < 2; i++)
117 close(fd[i]);
118
119 kill(pid, SIGKILL);
120 wait(NULL);
121out:
122 perf_evlist__delete(evlist);
123 return err;
124}
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
index 5f49484f1abc..658cd200af74 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -9,7 +9,9 @@
9#include "thread_map.h" 9#include "thread_map.h"
10#include "cpumap.h" 10#include "cpumap.h"
11#include "tsc.h" 11#include "tsc.h"
12#include "tests.h" 12#include "tests/tests.h"
13
14#include "arch-tests.h"
13 15
14#define CHECK__(x) { \ 16#define CHECK__(x) { \
15 while ((x) < 0) { \ 17 while ((x) < 0) { \
diff --git a/tools/perf/tests/rdpmc.c b/tools/perf/arch/x86/tests/rdpmc.c
index d31f2c4d9f64..e7688214c7cf 100644
--- a/tools/perf/tests/rdpmc.c
+++ b/tools/perf/arch/x86/tests/rdpmc.c
@@ -5,10 +5,9 @@
5#include <linux/types.h> 5#include <linux/types.h>
6#include "perf.h" 6#include "perf.h"
7#include "debug.h" 7#include "debug.h"
8#include "tests.h" 8#include "tests/tests.h"
9#include "cloexec.h" 9#include "cloexec.h"
10 10#include "arch-tests.h"
11#if defined(__x86_64__) || defined(__i386__)
12 11
13static u64 rdpmc(unsigned int counter) 12static u64 rdpmc(unsigned int counter)
14{ 13{
@@ -173,5 +172,3 @@ int test__rdpmc(void)
173 172
174 return 0; 173 return 0;
175} 174}
176
177#endif
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
index a08de0a35b83..9223c164e545 100644
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -21,55 +21,109 @@
21 */ 21 */
22 22
23#include <stddef.h> 23#include <stddef.h>
24#include <errno.h> /* for EINVAL */
25#include <string.h> /* for strcmp */
26#include <linux/ptrace.h> /* for struct pt_regs */
27#include <linux/kernel.h> /* for offsetof */
24#include <dwarf-regs.h> 28#include <dwarf-regs.h>
25 29
26/* 30/*
27 * Generic dwarf analysis helpers 31 * See arch/x86/kernel/ptrace.c.
32 * Different from it:
33 *
34 * - Since struct pt_regs is defined differently for user and kernel,
35 * but we want to use 'ax, bx' instead of 'rax, rbx' (which is struct
36 * field name of user's pt_regs), we make REG_OFFSET_NAME to accept
37 * both string name and reg field name.
38 *
39 * - Since accessing x86_32's pt_regs from x86_64 building is difficult
40 * and vise versa, we simply fill offset with -1, so
41 * get_arch_regstr() still works but regs_query_register_offset()
42 * returns error.
43 * The only inconvenience caused by it now is that we are not allowed
44 * to generate BPF prologue for a x86_64 kernel if perf is built for
45 * x86_32. This is really a rare usecase.
46 *
47 * - Order is different from kernel's ptrace.c for get_arch_regstr(). Use
48 * the order defined by dwarf.
28 */ 49 */
29 50
30#define X86_32_MAX_REGS 8 51struct pt_regs_offset {
31const char *x86_32_regs_table[X86_32_MAX_REGS] = { 52 const char *name;
32 "%ax", 53 int offset;
33 "%cx", 54};
34 "%dx", 55
35 "%bx", 56#define REG_OFFSET_END {.name = NULL, .offset = 0}
36 "$stack", /* Stack address instead of %sp */ 57
37 "%bp", 58#ifdef __x86_64__
38 "%si", 59# define REG_OFFSET_NAME_64(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
39 "%di", 60# define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = -1}
61#else
62# define REG_OFFSET_NAME_64(n, r) {.name = n, .offset = -1}
63# define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
64#endif
65
66static const struct pt_regs_offset x86_32_regoffset_table[] = {
67 REG_OFFSET_NAME_32("%ax", eax),
68 REG_OFFSET_NAME_32("%cx", ecx),
69 REG_OFFSET_NAME_32("%dx", edx),
70 REG_OFFSET_NAME_32("%bx", ebx),
71 REG_OFFSET_NAME_32("$stack", esp), /* Stack address instead of %sp */
72 REG_OFFSET_NAME_32("%bp", ebp),
73 REG_OFFSET_NAME_32("%si", esi),
74 REG_OFFSET_NAME_32("%di", edi),
75 REG_OFFSET_END,
40}; 76};
41 77
42#define X86_64_MAX_REGS 16 78static const struct pt_regs_offset x86_64_regoffset_table[] = {
43const char *x86_64_regs_table[X86_64_MAX_REGS] = { 79 REG_OFFSET_NAME_64("%ax", rax),
44 "%ax", 80 REG_OFFSET_NAME_64("%dx", rdx),
45 "%dx", 81 REG_OFFSET_NAME_64("%cx", rcx),
46 "%cx", 82 REG_OFFSET_NAME_64("%bx", rbx),
47 "%bx", 83 REG_OFFSET_NAME_64("%si", rsi),
48 "%si", 84 REG_OFFSET_NAME_64("%di", rdi),
49 "%di", 85 REG_OFFSET_NAME_64("%bp", rbp),
50 "%bp", 86 REG_OFFSET_NAME_64("%sp", rsp),
51 "%sp", 87 REG_OFFSET_NAME_64("%r8", r8),
52 "%r8", 88 REG_OFFSET_NAME_64("%r9", r9),
53 "%r9", 89 REG_OFFSET_NAME_64("%r10", r10),
54 "%r10", 90 REG_OFFSET_NAME_64("%r11", r11),
55 "%r11", 91 REG_OFFSET_NAME_64("%r12", r12),
56 "%r12", 92 REG_OFFSET_NAME_64("%r13", r13),
57 "%r13", 93 REG_OFFSET_NAME_64("%r14", r14),
58 "%r14", 94 REG_OFFSET_NAME_64("%r15", r15),
59 "%r15", 95 REG_OFFSET_END,
60}; 96};
61 97
62/* TODO: switching by dwarf address size */ 98/* TODO: switching by dwarf address size */
63#ifdef __x86_64__ 99#ifdef __x86_64__
64#define ARCH_MAX_REGS X86_64_MAX_REGS 100#define regoffset_table x86_64_regoffset_table
65#define arch_regs_table x86_64_regs_table
66#else 101#else
67#define ARCH_MAX_REGS X86_32_MAX_REGS 102#define regoffset_table x86_32_regoffset_table
68#define arch_regs_table x86_32_regs_table
69#endif 103#endif
70 104
105/* Minus 1 for the ending REG_OFFSET_END */
106#define ARCH_MAX_REGS ((sizeof(regoffset_table) / sizeof(regoffset_table[0])) - 1)
107
71/* Return architecture dependent register string (for kprobe-tracer) */ 108/* Return architecture dependent register string (for kprobe-tracer) */
72const char *get_arch_regstr(unsigned int n) 109const char *get_arch_regstr(unsigned int n)
73{ 110{
74 return (n < ARCH_MAX_REGS) ? arch_regs_table[n] : NULL; 111 return (n < ARCH_MAX_REGS) ? regoffset_table[n].name : NULL;
112}
113
114/* Reuse code from arch/x86/kernel/ptrace.c */
115/**
116 * regs_query_register_offset() - query register offset from its name
117 * @name: the name of a register
118 *
119 * regs_query_register_offset() returns the offset of a register in struct
120 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
121 */
122int regs_query_register_offset(const char *name)
123{
124 const struct pt_regs_offset *roff;
125 for (roff = regoffset_table; roff->name != NULL; roff++)
126 if (!strcmp(roff->name, name))
127 return roff->offset;
128 return -EINVAL;
75} 129}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 2ca10d796c0b..b02af064f0f9 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -624,13 +624,49 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
624 * threads. 624 * threads.
625 */ 625 */
626 if (have_timing_info && !cpu_map__empty(cpus)) { 626 if (have_timing_info && !cpu_map__empty(cpus)) {
627 err = intel_pt_track_switches(evlist); 627 if (perf_can_record_switch_events()) {
628 if (err == -EPERM) 628 bool cpu_wide = !target__none(&opts->target) &&
629 pr_debug2("Unable to select sched:sched_switch\n"); 629 !target__has_task(&opts->target);
630 else if (err) 630
631 return err; 631 if (!cpu_wide && perf_can_record_cpu_wide()) {
632 else 632 struct perf_evsel *switch_evsel;
633 ptr->have_sched_switch = 1; 633
634 err = parse_events(evlist, "dummy:u", NULL);
635 if (err)
636 return err;
637
638 switch_evsel = perf_evlist__last(evlist);
639
640 switch_evsel->attr.freq = 0;
641 switch_evsel->attr.sample_period = 1;
642 switch_evsel->attr.context_switch = 1;
643
644 switch_evsel->system_wide = true;
645 switch_evsel->no_aux_samples = true;
646 switch_evsel->immediate = true;
647
648 perf_evsel__set_sample_bit(switch_evsel, TID);
649 perf_evsel__set_sample_bit(switch_evsel, TIME);
650 perf_evsel__set_sample_bit(switch_evsel, CPU);
651
652 opts->record_switch_events = false;
653 ptr->have_sched_switch = 3;
654 } else {
655 opts->record_switch_events = true;
656 if (cpu_wide)
657 ptr->have_sched_switch = 3;
658 else
659 ptr->have_sched_switch = 2;
660 }
661 } else {
662 err = intel_pt_track_switches(evlist);
663 if (err == -EPERM)
664 pr_debug2("Unable to select sched:sched_switch\n");
665 else if (err)
666 return err;
667 else
668 ptr->have_sched_switch = 1;
669 }
634 } 670 }
635 671
636 if (intel_pt_evsel) { 672 if (intel_pt_evsel) {
@@ -663,8 +699,11 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
663 tracking_evsel->attr.sample_period = 1; 699 tracking_evsel->attr.sample_period = 1;
664 700
665 /* In per-cpu case, always need the time of mmap events etc */ 701 /* In per-cpu case, always need the time of mmap events etc */
666 if (!cpu_map__empty(cpus)) 702 if (!cpu_map__empty(cpus)) {
667 perf_evsel__set_sample_bit(tracking_evsel, TIME); 703 perf_evsel__set_sample_bit(tracking_evsel, TIME);
704 /* And the CPU for switch events */
705 perf_evsel__set_sample_bit(tracking_evsel, CPU);
706 }
668 } 707 }
669 708
670 /* 709 /*
diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
index 573e28896038..60bf11943047 100644
--- a/tools/perf/bench/Build
+++ b/tools/perf/bench/Build
@@ -1,6 +1,6 @@
1perf-y += sched-messaging.o 1perf-y += sched-messaging.o
2perf-y += sched-pipe.o 2perf-y += sched-pipe.o
3perf-y += mem-memcpy.o 3perf-y += mem-functions.o
4perf-y += futex-hash.o 4perf-y += futex-hash.o
5perf-y += futex-wake.o 5perf-y += futex-wake.o
6perf-y += futex-wake-parallel.o 6perf-y += futex-wake-parallel.o
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
new file mode 100644
index 000000000000..9419b944220f
--- /dev/null
+++ b/tools/perf/bench/mem-functions.c
@@ -0,0 +1,379 @@
1/*
2 * mem-memcpy.c
3 *
4 * Simple memcpy() and memset() benchmarks
5 *
6 * Written by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
7 */
8
9#include "../perf.h"
10#include "../util/util.h"
11#include "../util/parse-options.h"
12#include "../util/header.h"
13#include "../util/cloexec.h"
14#include "bench.h"
15#include "mem-memcpy-arch.h"
16#include "mem-memset-arch.h"
17
18#include <stdio.h>
19#include <stdlib.h>
20#include <string.h>
21#include <sys/time.h>
22#include <errno.h>
23
24#define K 1024
25
26static const char *size_str = "1MB";
27static const char *function_str = "all";
28static int nr_loops = 1;
29static bool use_cycles;
30static int cycles_fd;
31
32static const struct option options[] = {
33 OPT_STRING('s', "size", &size_str, "1MB",
34 "Specify the size of the memory buffers. "
35 "Available units: B, KB, MB, GB and TB (case insensitive)"),
36
37 OPT_STRING('f', "function", &function_str, "all",
38 "Specify the function to run, \"all\" runs all available functions, \"help\" lists them"),
39
40 OPT_INTEGER('l', "nr_loops", &nr_loops,
41 "Specify the number of loops to run. (default: 1)"),
42
43 OPT_BOOLEAN('c', "cycles", &use_cycles,
44 "Use a cycles event instead of gettimeofday() to measure performance"),
45
46 OPT_END()
47};
48
49typedef void *(*memcpy_t)(void *, const void *, size_t);
50typedef void *(*memset_t)(void *, int, size_t);
51
52struct function {
53 const char *name;
54 const char *desc;
55 union {
56 memcpy_t memcpy;
57 memset_t memset;
58 } fn;
59};
60
61static struct perf_event_attr cycle_attr = {
62 .type = PERF_TYPE_HARDWARE,
63 .config = PERF_COUNT_HW_CPU_CYCLES
64};
65
66static void init_cycles(void)
67{
68 cycles_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, perf_event_open_cloexec_flag());
69
70 if (cycles_fd < 0 && errno == ENOSYS)
71 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
72 else
73 BUG_ON(cycles_fd < 0);
74}
75
76static u64 get_cycles(void)
77{
78 int ret;
79 u64 clk;
80
81 ret = read(cycles_fd, &clk, sizeof(u64));
82 BUG_ON(ret != sizeof(u64));
83
84 return clk;
85}
86
87static double timeval2double(struct timeval *ts)
88{
89 return (double)ts->tv_sec + (double)ts->tv_usec / (double)1000000;
90}
91
92#define print_bps(x) do { \
93 if (x < K) \
94 printf(" %14lf bytes/sec\n", x); \
95 else if (x < K * K) \
96 printf(" %14lfd KB/sec\n", x / K); \
97 else if (x < K * K * K) \
98 printf(" %14lf MB/sec\n", x / K / K); \
99 else \
100 printf(" %14lf GB/sec\n", x / K / K / K); \
101 } while (0)
102
103struct bench_mem_info {
104 const struct function *functions;
105 u64 (*do_cycles)(const struct function *r, size_t size);
106 double (*do_gettimeofday)(const struct function *r, size_t size);
107 const char *const *usage;
108};
109
110static void __bench_mem_function(struct bench_mem_info *info, int r_idx, size_t size, double size_total)
111{
112 const struct function *r = &info->functions[r_idx];
113 double result_bps = 0.0;
114 u64 result_cycles = 0;
115
116 printf("# function '%s' (%s)\n", r->name, r->desc);
117
118 if (bench_format == BENCH_FORMAT_DEFAULT)
119 printf("# Copying %s bytes ...\n\n", size_str);
120
121 if (use_cycles) {
122 result_cycles = info->do_cycles(r, size);
123 } else {
124 result_bps = info->do_gettimeofday(r, size);
125 }
126
127 switch (bench_format) {
128 case BENCH_FORMAT_DEFAULT:
129 if (use_cycles) {
130 printf(" %14lf cycles/byte\n", (double)result_cycles/size_total);
131 } else {
132 print_bps(result_bps);
133 }
134 break;
135
136 case BENCH_FORMAT_SIMPLE:
137 if (use_cycles) {
138 printf("%lf\n", (double)result_cycles/size_total);
139 } else {
140 printf("%lf\n", result_bps);
141 }
142 break;
143
144 default:
145 BUG_ON(1);
146 break;
147 }
148}
149
150static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *info)
151{
152 int i;
153 size_t size;
154 double size_total;
155
156 argc = parse_options(argc, argv, options, info->usage, 0);
157
158 if (use_cycles)
159 init_cycles();
160
161 size = (size_t)perf_atoll((char *)size_str);
162 size_total = (double)size * nr_loops;
163
164 if ((s64)size <= 0) {
165 fprintf(stderr, "Invalid size:%s\n", size_str);
166 return 1;
167 }
168
169 if (!strncmp(function_str, "all", 3)) {
170 for (i = 0; info->functions[i].name; i++)
171 __bench_mem_function(info, i, size, size_total);
172 return 0;
173 }
174
175 for (i = 0; info->functions[i].name; i++) {
176 if (!strcmp(info->functions[i].name, function_str))
177 break;
178 }
179 if (!info->functions[i].name) {
180 if (strcmp(function_str, "help") && strcmp(function_str, "h"))
181 printf("Unknown function: %s\n", function_str);
182 printf("Available functions:\n");
183 for (i = 0; info->functions[i].name; i++) {
184 printf("\t%s ... %s\n",
185 info->functions[i].name, info->functions[i].desc);
186 }
187 return 1;
188 }
189
190 __bench_mem_function(info, i, size, size_total);
191
192 return 0;
193}
194
195static void memcpy_alloc_mem(void **dst, void **src, size_t size)
196{
197 *dst = zalloc(size);
198 if (!*dst)
199 die("memory allocation failed - maybe size is too large?\n");
200
201 *src = zalloc(size);
202 if (!*src)
203 die("memory allocation failed - maybe size is too large?\n");
204
205 /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
206 memset(*src, 0, size);
207}
208
209static u64 do_memcpy_cycles(const struct function *r, size_t size)
210{
211 u64 cycle_start = 0ULL, cycle_end = 0ULL;
212 void *src = NULL, *dst = NULL;
213 memcpy_t fn = r->fn.memcpy;
214 int i;
215
216 memcpy_alloc_mem(&dst, &src, size);
217
218 /*
219 * We prefault the freshly allocated memory range here,
220 * to not measure page fault overhead:
221 */
222 fn(dst, src, size);
223
224 cycle_start = get_cycles();
225 for (i = 0; i < nr_loops; ++i)
226 fn(dst, src, size);
227 cycle_end = get_cycles();
228
229 free(src);
230 free(dst);
231 return cycle_end - cycle_start;
232}
233
234static double do_memcpy_gettimeofday(const struct function *r, size_t size)
235{
236 struct timeval tv_start, tv_end, tv_diff;
237 memcpy_t fn = r->fn.memcpy;
238 void *src = NULL, *dst = NULL;
239 int i;
240
241 memcpy_alloc_mem(&dst, &src, size);
242
243 /*
244 * We prefault the freshly allocated memory range here,
245 * to not measure page fault overhead:
246 */
247 fn(dst, src, size);
248
249 BUG_ON(gettimeofday(&tv_start, NULL));
250 for (i = 0; i < nr_loops; ++i)
251 fn(dst, src, size);
252 BUG_ON(gettimeofday(&tv_end, NULL));
253
254 timersub(&tv_end, &tv_start, &tv_diff);
255
256 free(src);
257 free(dst);
258
259 return (double)(((double)size * nr_loops) / timeval2double(&tv_diff));
260}
261
262struct function memcpy_functions[] = {
263 { .name = "default",
264 .desc = "Default memcpy() provided by glibc",
265 .fn.memcpy = memcpy },
266
267#ifdef HAVE_ARCH_X86_64_SUPPORT
268# define MEMCPY_FN(_fn, _name, _desc) {.name = _name, .desc = _desc, .fn.memcpy = _fn},
269# include "mem-memcpy-x86-64-asm-def.h"
270# undef MEMCPY_FN
271#endif
272
273 { .name = NULL, }
274};
275
276static const char * const bench_mem_memcpy_usage[] = {
277 "perf bench mem memcpy <options>",
278 NULL
279};
280
281int bench_mem_memcpy(int argc, const char **argv, const char *prefix __maybe_unused)
282{
283 struct bench_mem_info info = {
284 .functions = memcpy_functions,
285 .do_cycles = do_memcpy_cycles,
286 .do_gettimeofday = do_memcpy_gettimeofday,
287 .usage = bench_mem_memcpy_usage,
288 };
289
290 return bench_mem_common(argc, argv, &info);
291}
292
293static void memset_alloc_mem(void **dst, size_t size)
294{
295 *dst = zalloc(size);
296 if (!*dst)
297 die("memory allocation failed - maybe size is too large?\n");
298}
299
300static u64 do_memset_cycles(const struct function *r, size_t size)
301{
302 u64 cycle_start = 0ULL, cycle_end = 0ULL;
303 memset_t fn = r->fn.memset;
304 void *dst = NULL;
305 int i;
306
307 memset_alloc_mem(&dst, size);
308
309 /*
310 * We prefault the freshly allocated memory range here,
311 * to not measure page fault overhead:
312 */
313 fn(dst, -1, size);
314
315 cycle_start = get_cycles();
316 for (i = 0; i < nr_loops; ++i)
317 fn(dst, i, size);
318 cycle_end = get_cycles();
319
320 free(dst);
321 return cycle_end - cycle_start;
322}
323
324static double do_memset_gettimeofday(const struct function *r, size_t size)
325{
326 struct timeval tv_start, tv_end, tv_diff;
327 memset_t fn = r->fn.memset;
328 void *dst = NULL;
329 int i;
330
331 memset_alloc_mem(&dst, size);
332
333 /*
334 * We prefault the freshly allocated memory range here,
335 * to not measure page fault overhead:
336 */
337 fn(dst, -1, size);
338
339 BUG_ON(gettimeofday(&tv_start, NULL));
340 for (i = 0; i < nr_loops; ++i)
341 fn(dst, i, size);
342 BUG_ON(gettimeofday(&tv_end, NULL));
343
344 timersub(&tv_end, &tv_start, &tv_diff);
345
346 free(dst);
347 return (double)(((double)size * nr_loops) / timeval2double(&tv_diff));
348}
349
350static const char * const bench_mem_memset_usage[] = {
351 "perf bench mem memset <options>",
352 NULL
353};
354
355static const struct function memset_functions[] = {
356 { .name = "default",
357 .desc = "Default memset() provided by glibc",
358 .fn.memset = memset },
359
360#ifdef HAVE_ARCH_X86_64_SUPPORT
361# define MEMSET_FN(_fn, _name, _desc) { .name = _name, .desc = _desc, .fn.memset = _fn },
362# include "mem-memset-x86-64-asm-def.h"
363# undef MEMSET_FN
364#endif
365
366 { .name = NULL, }
367};
368
369int bench_mem_memset(int argc, const char **argv, const char *prefix __maybe_unused)
370{
371 struct bench_mem_info info = {
372 .functions = memset_functions,
373 .do_cycles = do_memset_cycles,
374 .do_gettimeofday = do_memset_gettimeofday,
375 .usage = bench_mem_memset_usage,
376 };
377
378 return bench_mem_common(argc, argv, &info);
379}
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
deleted file mode 100644
index d3dfb7936dcd..000000000000
--- a/tools/perf/bench/mem-memcpy.c
+++ /dev/null
@@ -1,434 +0,0 @@
1/*
2 * mem-memcpy.c
3 *
4 * memcpy: Simple memory copy in various ways
5 *
6 * Written by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
7 */
8
9#include "../perf.h"
10#include "../util/util.h"
11#include "../util/parse-options.h"
12#include "../util/header.h"
13#include "../util/cloexec.h"
14#include "bench.h"
15#include "mem-memcpy-arch.h"
16#include "mem-memset-arch.h"
17
18#include <stdio.h>
19#include <stdlib.h>
20#include <string.h>
21#include <sys/time.h>
22#include <errno.h>
23
24#define K 1024
25
26static const char *length_str = "1MB";
27static const char *routine = "default";
28static int iterations = 1;
29static bool use_cycle;
30static int cycle_fd;
31static bool only_prefault;
32static bool no_prefault;
33
34static const struct option options[] = {
35 OPT_STRING('l', "length", &length_str, "1MB",
36 "Specify length of memory to copy. "
37 "Available units: B, KB, MB, GB and TB (upper and lower)"),
38 OPT_STRING('r', "routine", &routine, "default",
39 "Specify routine to copy, \"all\" runs all available routines"),
40 OPT_INTEGER('i', "iterations", &iterations,
41 "repeat memcpy() invocation this number of times"),
42 OPT_BOOLEAN('c', "cycle", &use_cycle,
43 "Use cycles event instead of gettimeofday() for measuring"),
44 OPT_BOOLEAN('o', "only-prefault", &only_prefault,
45 "Show only the result with page faults before memcpy()"),
46 OPT_BOOLEAN('n', "no-prefault", &no_prefault,
47 "Show only the result without page faults before memcpy()"),
48 OPT_END()
49};
50
51typedef void *(*memcpy_t)(void *, const void *, size_t);
52typedef void *(*memset_t)(void *, int, size_t);
53
54struct routine {
55 const char *name;
56 const char *desc;
57 union {
58 memcpy_t memcpy;
59 memset_t memset;
60 } fn;
61};
62
63struct routine memcpy_routines[] = {
64 { .name = "default",
65 .desc = "Default memcpy() provided by glibc",
66 .fn.memcpy = memcpy },
67#ifdef HAVE_ARCH_X86_64_SUPPORT
68
69#define MEMCPY_FN(_fn, _name, _desc) {.name = _name, .desc = _desc, .fn.memcpy = _fn},
70#include "mem-memcpy-x86-64-asm-def.h"
71#undef MEMCPY_FN
72
73#endif
74
75 { NULL,
76 NULL,
77 {NULL} }
78};
79
80static const char * const bench_mem_memcpy_usage[] = {
81 "perf bench mem memcpy <options>",
82 NULL
83};
84
85static struct perf_event_attr cycle_attr = {
86 .type = PERF_TYPE_HARDWARE,
87 .config = PERF_COUNT_HW_CPU_CYCLES
88};
89
90static void init_cycle(void)
91{
92 cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1,
93 perf_event_open_cloexec_flag());
94
95 if (cycle_fd < 0 && errno == ENOSYS)
96 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
97 else
98 BUG_ON(cycle_fd < 0);
99}
100
101static u64 get_cycle(void)
102{
103 int ret;
104 u64 clk;
105
106 ret = read(cycle_fd, &clk, sizeof(u64));
107 BUG_ON(ret != sizeof(u64));
108
109 return clk;
110}
111
112static double timeval2double(struct timeval *ts)
113{
114 return (double)ts->tv_sec +
115 (double)ts->tv_usec / (double)1000000;
116}
117
118#define pf (no_prefault ? 0 : 1)
119
120#define print_bps(x) do { \
121 if (x < K) \
122 printf(" %14lf B/Sec", x); \
123 else if (x < K * K) \
124 printf(" %14lfd KB/Sec", x / K); \
125 else if (x < K * K * K) \
126 printf(" %14lf MB/Sec", x / K / K); \
127 else \
128 printf(" %14lf GB/Sec", x / K / K / K); \
129 } while (0)
130
131struct bench_mem_info {
132 const struct routine *routines;
133 u64 (*do_cycle)(const struct routine *r, size_t len, bool prefault);
134 double (*do_gettimeofday)(const struct routine *r, size_t len, bool prefault);
135 const char *const *usage;
136};
137
138static void __bench_mem_routine(struct bench_mem_info *info, int r_idx, size_t len, double totallen)
139{
140 const struct routine *r = &info->routines[r_idx];
141 double result_bps[2];
142 u64 result_cycle[2];
143
144 result_cycle[0] = result_cycle[1] = 0ULL;
145 result_bps[0] = result_bps[1] = 0.0;
146
147 printf("Routine %s (%s)\n", r->name, r->desc);
148
149 if (bench_format == BENCH_FORMAT_DEFAULT)
150 printf("# Copying %s Bytes ...\n\n", length_str);
151
152 if (!only_prefault && !no_prefault) {
153 /* show both of results */
154 if (use_cycle) {
155 result_cycle[0] = info->do_cycle(r, len, false);
156 result_cycle[1] = info->do_cycle(r, len, true);
157 } else {
158 result_bps[0] = info->do_gettimeofday(r, len, false);
159 result_bps[1] = info->do_gettimeofday(r, len, true);
160 }
161 } else {
162 if (use_cycle)
163 result_cycle[pf] = info->do_cycle(r, len, only_prefault);
164 else
165 result_bps[pf] = info->do_gettimeofday(r, len, only_prefault);
166 }
167
168 switch (bench_format) {
169 case BENCH_FORMAT_DEFAULT:
170 if (!only_prefault && !no_prefault) {
171 if (use_cycle) {
172 printf(" %14lf Cycle/Byte\n",
173 (double)result_cycle[0]
174 / totallen);
175 printf(" %14lf Cycle/Byte (with prefault)\n",
176 (double)result_cycle[1]
177 / totallen);
178 } else {
179 print_bps(result_bps[0]);
180 printf("\n");
181 print_bps(result_bps[1]);
182 printf(" (with prefault)\n");
183 }
184 } else {
185 if (use_cycle) {
186 printf(" %14lf Cycle/Byte",
187 (double)result_cycle[pf]
188 / totallen);
189 } else
190 print_bps(result_bps[pf]);
191
192 printf("%s\n", only_prefault ? " (with prefault)" : "");
193 }
194 break;
195 case BENCH_FORMAT_SIMPLE:
196 if (!only_prefault && !no_prefault) {
197 if (use_cycle) {
198 printf("%lf %lf\n",
199 (double)result_cycle[0] / totallen,
200 (double)result_cycle[1] / totallen);
201 } else {
202 printf("%lf %lf\n",
203 result_bps[0], result_bps[1]);
204 }
205 } else {
206 if (use_cycle) {
207 printf("%lf\n", (double)result_cycle[pf]
208 / totallen);
209 } else
210 printf("%lf\n", result_bps[pf]);
211 }
212 break;
213 default:
214 /* reaching this means there's some disaster: */
215 die("unknown format: %d\n", bench_format);
216 break;
217 }
218}
219
220static int bench_mem_common(int argc, const char **argv,
221 const char *prefix __maybe_unused,
222 struct bench_mem_info *info)
223{
224 int i;
225 size_t len;
226 double totallen;
227
228 argc = parse_options(argc, argv, options,
229 info->usage, 0);
230
231 if (no_prefault && only_prefault) {
232 fprintf(stderr, "Invalid options: -o and -n are mutually exclusive\n");
233 return 1;
234 }
235
236 if (use_cycle)
237 init_cycle();
238
239 len = (size_t)perf_atoll((char *)length_str);
240 totallen = (double)len * iterations;
241
242 if ((s64)len <= 0) {
243 fprintf(stderr, "Invalid length:%s\n", length_str);
244 return 1;
245 }
246
247 /* same to without specifying either of prefault and no-prefault */
248 if (only_prefault && no_prefault)
249 only_prefault = no_prefault = false;
250
251 if (!strncmp(routine, "all", 3)) {
252 for (i = 0; info->routines[i].name; i++)
253 __bench_mem_routine(info, i, len, totallen);
254 return 0;
255 }
256
257 for (i = 0; info->routines[i].name; i++) {
258 if (!strcmp(info->routines[i].name, routine))
259 break;
260 }
261 if (!info->routines[i].name) {
262 printf("Unknown routine:%s\n", routine);
263 printf("Available routines...\n");
264 for (i = 0; info->routines[i].name; i++) {
265 printf("\t%s ... %s\n",
266 info->routines[i].name, info->routines[i].desc);
267 }
268 return 1;
269 }
270
271 __bench_mem_routine(info, i, len, totallen);
272
273 return 0;
274}
275
276static void memcpy_alloc_mem(void **dst, void **src, size_t length)
277{
278 *dst = zalloc(length);
279 if (!*dst)
280 die("memory allocation failed - maybe length is too large?\n");
281
282 *src = zalloc(length);
283 if (!*src)
284 die("memory allocation failed - maybe length is too large?\n");
285 /* Make sure to always replace the zero pages even if MMAP_THRESH is crossed */
286 memset(*src, 0, length);
287}
288
289static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
290{
291 u64 cycle_start = 0ULL, cycle_end = 0ULL;
292 void *src = NULL, *dst = NULL;
293 memcpy_t fn = r->fn.memcpy;
294 int i;
295
296 memcpy_alloc_mem(&dst, &src, len);
297
298 if (prefault)
299 fn(dst, src, len);
300
301 cycle_start = get_cycle();
302 for (i = 0; i < iterations; ++i)
303 fn(dst, src, len);
304 cycle_end = get_cycle();
305
306 free(src);
307 free(dst);
308 return cycle_end - cycle_start;
309}
310
311static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
312 bool prefault)
313{
314 struct timeval tv_start, tv_end, tv_diff;
315 memcpy_t fn = r->fn.memcpy;
316 void *src = NULL, *dst = NULL;
317 int i;
318
319 memcpy_alloc_mem(&dst, &src, len);
320
321 if (prefault)
322 fn(dst, src, len);
323
324 BUG_ON(gettimeofday(&tv_start, NULL));
325 for (i = 0; i < iterations; ++i)
326 fn(dst, src, len);
327 BUG_ON(gettimeofday(&tv_end, NULL));
328
329 timersub(&tv_end, &tv_start, &tv_diff);
330
331 free(src);
332 free(dst);
333 return (double)(((double)len * iterations) / timeval2double(&tv_diff));
334}
335
336int bench_mem_memcpy(int argc, const char **argv,
337 const char *prefix __maybe_unused)
338{
339 struct bench_mem_info info = {
340 .routines = memcpy_routines,
341 .do_cycle = do_memcpy_cycle,
342 .do_gettimeofday = do_memcpy_gettimeofday,
343 .usage = bench_mem_memcpy_usage,
344 };
345
346 return bench_mem_common(argc, argv, prefix, &info);
347}
348
349static void memset_alloc_mem(void **dst, size_t length)
350{
351 *dst = zalloc(length);
352 if (!*dst)
353 die("memory allocation failed - maybe length is too large?\n");
354}
355
356static u64 do_memset_cycle(const struct routine *r, size_t len, bool prefault)
357{
358 u64 cycle_start = 0ULL, cycle_end = 0ULL;
359 memset_t fn = r->fn.memset;
360 void *dst = NULL;
361 int i;
362
363 memset_alloc_mem(&dst, len);
364
365 if (prefault)
366 fn(dst, -1, len);
367
368 cycle_start = get_cycle();
369 for (i = 0; i < iterations; ++i)
370 fn(dst, i, len);
371 cycle_end = get_cycle();
372
373 free(dst);
374 return cycle_end - cycle_start;
375}
376
377static double do_memset_gettimeofday(const struct routine *r, size_t len,
378 bool prefault)
379{
380 struct timeval tv_start, tv_end, tv_diff;
381 memset_t fn = r->fn.memset;
382 void *dst = NULL;
383 int i;
384
385 memset_alloc_mem(&dst, len);
386
387 if (prefault)
388 fn(dst, -1, len);
389
390 BUG_ON(gettimeofday(&tv_start, NULL));
391 for (i = 0; i < iterations; ++i)
392 fn(dst, i, len);
393 BUG_ON(gettimeofday(&tv_end, NULL));
394
395 timersub(&tv_end, &tv_start, &tv_diff);
396
397 free(dst);
398 return (double)(((double)len * iterations) / timeval2double(&tv_diff));
399}
400
401static const char * const bench_mem_memset_usage[] = {
402 "perf bench mem memset <options>",
403 NULL
404};
405
406static const struct routine memset_routines[] = {
407 { .name ="default",
408 .desc = "Default memset() provided by glibc",
409 .fn.memset = memset },
410#ifdef HAVE_ARCH_X86_64_SUPPORT
411
412#define MEMSET_FN(_fn, _name, _desc) { .name = _name, .desc = _desc, .fn.memset = _fn },
413#include "mem-memset-x86-64-asm-def.h"
414#undef MEMSET_FN
415
416#endif
417
418 { .name = NULL,
419 .desc = NULL,
420 .fn.memset = NULL }
421};
422
423int bench_mem_memset(int argc, const char **argv,
424 const char *prefix __maybe_unused)
425{
426 struct bench_mem_info info = {
427 .routines = memset_routines,
428 .do_cycle = do_memset_cycle,
429 .do_gettimeofday = do_memset_gettimeofday,
430 .usage = bench_mem_memset_usage,
431 };
432
433 return bench_mem_common(argc, argv, prefix, &info);
434}
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 870b7e665a20..492df2752a2d 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -164,8 +164,8 @@ static const struct option options[] = {
164 OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"), 164 OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
165 OPT_STRING('T', "mb_thread" , &p0.mb_thread_str, "MB", "thread memory (MBs)"), 165 OPT_STRING('T', "mb_thread" , &p0.mb_thread_str, "MB", "thread memory (MBs)"),
166 166
167 OPT_UINTEGER('l', "nr_loops" , &p0.nr_loops, "max number of loops to run"), 167 OPT_UINTEGER('l', "nr_loops" , &p0.nr_loops, "max number of loops to run (default: unlimited)"),
168 OPT_UINTEGER('s', "nr_secs" , &p0.nr_secs, "max number of seconds to run"), 168 OPT_UINTEGER('s', "nr_secs" , &p0.nr_secs, "max number of seconds to run (default: 5 secs)"),
169 OPT_UINTEGER('u', "usleep" , &p0.sleep_usecs, "usecs to sleep per loop iteration"), 169 OPT_UINTEGER('u', "usleep" , &p0.sleep_usecs, "usecs to sleep per loop iteration"),
170 170
171 OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via writes (can be mixed with -W)"), 171 OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via writes (can be mixed with -W)"),
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index d7f281c2828d..d4ff1b539cfd 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -33,7 +33,7 @@
33#define DATASIZE 100 33#define DATASIZE 100
34 34
35static bool use_pipes = false; 35static bool use_pipes = false;
36static unsigned int loops = 100; 36static unsigned int nr_loops = 100;
37static bool thread_mode = false; 37static bool thread_mode = false;
38static unsigned int num_groups = 10; 38static unsigned int num_groups = 10;
39 39
@@ -79,7 +79,7 @@ static void ready(int ready_out, int wakefd)
79 err(EXIT_FAILURE, "poll"); 79 err(EXIT_FAILURE, "poll");
80} 80}
81 81
82/* Sender sprays loops messages down each file descriptor */ 82/* Sender sprays nr_loops messages down each file descriptor */
83static void *sender(struct sender_context *ctx) 83static void *sender(struct sender_context *ctx)
84{ 84{
85 char data[DATASIZE]; 85 char data[DATASIZE];
@@ -88,7 +88,7 @@ static void *sender(struct sender_context *ctx)
88 ready(ctx->ready_out, ctx->wakefd); 88 ready(ctx->ready_out, ctx->wakefd);
89 89
90 /* Now pump to every receiver. */ 90 /* Now pump to every receiver. */
91 for (i = 0; i < loops; i++) { 91 for (i = 0; i < nr_loops; i++) {
92 for (j = 0; j < ctx->num_fds; j++) { 92 for (j = 0; j < ctx->num_fds; j++) {
93 int ret, done = 0; 93 int ret, done = 0;
94 94
@@ -213,7 +213,7 @@ static unsigned int group(pthread_t *pth,
213 /* Create the pipe between client and server */ 213 /* Create the pipe between client and server */
214 fdpair(fds); 214 fdpair(fds);
215 215
216 ctx->num_packets = num_fds * loops; 216 ctx->num_packets = num_fds * nr_loops;
217 ctx->in_fds[0] = fds[0]; 217 ctx->in_fds[0] = fds[0];
218 ctx->in_fds[1] = fds[1]; 218 ctx->in_fds[1] = fds[1];
219 ctx->ready_out = ready_out; 219 ctx->ready_out = ready_out;
@@ -250,7 +250,7 @@ static const struct option options[] = {
250 OPT_BOOLEAN('t', "thread", &thread_mode, 250 OPT_BOOLEAN('t', "thread", &thread_mode,
251 "Be multi thread instead of multi process"), 251 "Be multi thread instead of multi process"),
252 OPT_UINTEGER('g', "group", &num_groups, "Specify number of groups"), 252 OPT_UINTEGER('g', "group", &num_groups, "Specify number of groups"),
253 OPT_UINTEGER('l', "loop", &loops, "Specify number of loops"), 253 OPT_UINTEGER('l', "nr_loops", &nr_loops, "Specify the number of loops to run (default: 100)"),
254 OPT_END() 254 OPT_END()
255}; 255};
256 256
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 8edc205ff9a7..2bf9b3fd9e61 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -211,7 +211,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
211 } 211 }
212 212
213 if (!objdump_path) { 213 if (!objdump_path) {
214 ret = perf_session_env__lookup_objdump(&session->header.env); 214 ret = perf_env__lookup_objdump(&session->header.env);
215 if (ret) 215 if (ret)
216 goto out; 216 goto out;
217 } 217 }
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index f67934d46d40..b17aed36ca16 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -36,7 +36,7 @@ struct bench {
36#ifdef HAVE_LIBNUMA_SUPPORT 36#ifdef HAVE_LIBNUMA_SUPPORT
37static struct bench numa_benchmarks[] = { 37static struct bench numa_benchmarks[] = {
38 { "mem", "Benchmark for NUMA workloads", bench_numa }, 38 { "mem", "Benchmark for NUMA workloads", bench_numa },
39 { "all", "Test all NUMA benchmarks", NULL }, 39 { "all", "Run all NUMA benchmarks", NULL },
40 { NULL, NULL, NULL } 40 { NULL, NULL, NULL }
41}; 41};
42#endif 42#endif
@@ -44,14 +44,14 @@ static struct bench numa_benchmarks[] = {
44static struct bench sched_benchmarks[] = { 44static struct bench sched_benchmarks[] = {
45 { "messaging", "Benchmark for scheduling and IPC", bench_sched_messaging }, 45 { "messaging", "Benchmark for scheduling and IPC", bench_sched_messaging },
46 { "pipe", "Benchmark for pipe() between two processes", bench_sched_pipe }, 46 { "pipe", "Benchmark for pipe() between two processes", bench_sched_pipe },
47 { "all", "Test all scheduler benchmarks", NULL }, 47 { "all", "Run all scheduler benchmarks", NULL },
48 { NULL, NULL, NULL } 48 { NULL, NULL, NULL }
49}; 49};
50 50
51static struct bench mem_benchmarks[] = { 51static struct bench mem_benchmarks[] = {
52 { "memcpy", "Benchmark for memcpy()", bench_mem_memcpy }, 52 { "memcpy", "Benchmark for memcpy() functions", bench_mem_memcpy },
53 { "memset", "Benchmark for memset() tests", bench_mem_memset }, 53 { "memset", "Benchmark for memset() functions", bench_mem_memset },
54 { "all", "Test all memory benchmarks", NULL }, 54 { "all", "Run all memory access benchmarks", NULL },
55 { NULL, NULL, NULL } 55 { NULL, NULL, NULL }
56}; 56};
57 57
@@ -62,7 +62,7 @@ static struct bench futex_benchmarks[] = {
62 { "requeue", "Benchmark for futex requeue calls", bench_futex_requeue }, 62 { "requeue", "Benchmark for futex requeue calls", bench_futex_requeue },
63 /* pi-futexes */ 63 /* pi-futexes */
64 { "lock-pi", "Benchmark for futex lock_pi calls", bench_futex_lock_pi }, 64 { "lock-pi", "Benchmark for futex lock_pi calls", bench_futex_lock_pi },
65 { "all", "Test all futex benchmarks", NULL }, 65 { "all", "Run all futex benchmarks", NULL },
66 { NULL, NULL, NULL } 66 { NULL, NULL, NULL }
67}; 67};
68 68
@@ -110,7 +110,7 @@ int bench_format = BENCH_FORMAT_DEFAULT;
110unsigned int bench_repeat = 10; /* default number of times to repeat the run */ 110unsigned int bench_repeat = 10; /* default number of times to repeat the run */
111 111
112static const struct option bench_options[] = { 112static const struct option bench_options[] = {
113 OPT_STRING('f', "format", &bench_format_str, "default", "Specify format style"), 113 OPT_STRING('f', "format", &bench_format_str, "default|simple", "Specify the output formatting style"),
114 OPT_UINTEGER('r', "repeat", &bench_repeat, "Specify amount of times to repeat the run"), 114 OPT_UINTEGER('r', "repeat", &bench_repeat, "Specify amount of times to repeat the run"),
115 OPT_END() 115 OPT_END()
116}; 116};
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 695ec5a50cf2..f4d62510acbb 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -61,8 +61,8 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
61 usage_with_options(evlist_usage, options); 61 usage_with_options(evlist_usage, options);
62 62
63 if (details.event_group && (details.verbose || details.freq)) { 63 if (details.event_group && (details.verbose || details.freq)) {
64 pr_err("--group option is not compatible with other options\n"); 64 usage_with_options_msg(evlist_usage, options,
65 usage_with_options(evlist_usage, options); 65 "--group option is not compatible with other options\n");
66 } 66 }
67 67
68 return __cmd_evlist(input_name, &details); 68 return __cmd_evlist(input_name, &details);
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 36486eade1ef..a7d588bf3cdd 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -463,7 +463,7 @@ int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
463 builtin_help_subcommands, builtin_help_usage, 0); 463 builtin_help_subcommands, builtin_help_usage, 0);
464 464
465 if (show_all) { 465 if (show_all) {
466 printf("\n usage: %s\n\n", perf_usage_string); 466 printf("\n Usage: %s\n\n", perf_usage_string);
467 list_commands("perf commands", &main_cmds, &other_cmds); 467 list_commands("perf commands", &main_cmds, &other_cmds);
468 printf(" %s\n\n", perf_more_info_string); 468 printf(" %s\n\n", perf_more_info_string);
469 return 0; 469 return 0;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index f62c49b35be0..0a945d2e8ca5 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -28,9 +28,11 @@ struct perf_inject {
28 bool build_ids; 28 bool build_ids;
29 bool sched_stat; 29 bool sched_stat;
30 bool have_auxtrace; 30 bool have_auxtrace;
31 bool strip;
31 const char *input_name; 32 const char *input_name;
32 struct perf_data_file output; 33 struct perf_data_file output;
33 u64 bytes_written; 34 u64 bytes_written;
35 u64 aux_id;
34 struct list_head samples; 36 struct list_head samples;
35 struct itrace_synth_opts itrace_synth_opts; 37 struct itrace_synth_opts itrace_synth_opts;
36}; 38};
@@ -176,6 +178,27 @@ static int perf_event__repipe(struct perf_tool *tool,
176 return perf_event__repipe_synth(tool, event); 178 return perf_event__repipe_synth(tool, event);
177} 179}
178 180
181static int perf_event__drop(struct perf_tool *tool __maybe_unused,
182 union perf_event *event __maybe_unused,
183 struct perf_sample *sample __maybe_unused,
184 struct machine *machine __maybe_unused)
185{
186 return 0;
187}
188
189static int perf_event__drop_aux(struct perf_tool *tool,
190 union perf_event *event __maybe_unused,
191 struct perf_sample *sample,
192 struct machine *machine __maybe_unused)
193{
194 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
195
196 if (!inject->aux_id)
197 inject->aux_id = sample->id;
198
199 return 0;
200}
201
179typedef int (*inject_handler)(struct perf_tool *tool, 202typedef int (*inject_handler)(struct perf_tool *tool,
180 union perf_event *event, 203 union perf_event *event,
181 struct perf_sample *sample, 204 struct perf_sample *sample,
@@ -466,6 +489,78 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel,
466 return 0; 489 return 0;
467} 490}
468 491
492static int drop_sample(struct perf_tool *tool __maybe_unused,
493 union perf_event *event __maybe_unused,
494 struct perf_sample *sample __maybe_unused,
495 struct perf_evsel *evsel __maybe_unused,
496 struct machine *machine __maybe_unused)
497{
498 return 0;
499}
500
501static void strip_init(struct perf_inject *inject)
502{
503 struct perf_evlist *evlist = inject->session->evlist;
504 struct perf_evsel *evsel;
505
506 inject->tool.context_switch = perf_event__drop;
507
508 evlist__for_each(evlist, evsel)
509 evsel->handler = drop_sample;
510}
511
512static bool has_tracking(struct perf_evsel *evsel)
513{
514 return evsel->attr.mmap || evsel->attr.mmap2 || evsel->attr.comm ||
515 evsel->attr.task;
516}
517
518#define COMPAT_MASK (PERF_SAMPLE_ID | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
519 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
520
521/*
522 * In order that the perf.data file is parsable, tracking events like MMAP need
523 * their selected event to exist, except if there is only 1 selected event left
524 * and it has a compatible sample type.
525 */
526static bool ok_to_remove(struct perf_evlist *evlist,
527 struct perf_evsel *evsel_to_remove)
528{
529 struct perf_evsel *evsel;
530 int cnt = 0;
531 bool ok = false;
532
533 if (!has_tracking(evsel_to_remove))
534 return true;
535
536 evlist__for_each(evlist, evsel) {
537 if (evsel->handler != drop_sample) {
538 cnt += 1;
539 if ((evsel->attr.sample_type & COMPAT_MASK) ==
540 (evsel_to_remove->attr.sample_type & COMPAT_MASK))
541 ok = true;
542 }
543 }
544
545 return ok && cnt == 1;
546}
547
548static void strip_fini(struct perf_inject *inject)
549{
550 struct perf_evlist *evlist = inject->session->evlist;
551 struct perf_evsel *evsel, *tmp;
552
553 /* Remove non-synthesized evsels if possible */
554 evlist__for_each_safe(evlist, tmp, evsel) {
555 if (evsel->handler == drop_sample &&
556 ok_to_remove(evlist, evsel)) {
557 pr_debug("Deleting %s\n", perf_evsel__name(evsel));
558 perf_evlist__remove(evlist, evsel);
559 perf_evsel__delete(evsel);
560 }
561 }
562}
563
469static int __cmd_inject(struct perf_inject *inject) 564static int __cmd_inject(struct perf_inject *inject)
470{ 565{
471 int ret = -EINVAL; 566 int ret = -EINVAL;
@@ -512,10 +607,14 @@ static int __cmd_inject(struct perf_inject *inject)
512 inject->tool.id_index = perf_event__repipe_id_index; 607 inject->tool.id_index = perf_event__repipe_id_index;
513 inject->tool.auxtrace_info = perf_event__process_auxtrace_info; 608 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
514 inject->tool.auxtrace = perf_event__process_auxtrace; 609 inject->tool.auxtrace = perf_event__process_auxtrace;
610 inject->tool.aux = perf_event__drop_aux;
611 inject->tool.itrace_start = perf_event__drop_aux,
515 inject->tool.ordered_events = true; 612 inject->tool.ordered_events = true;
516 inject->tool.ordering_requires_timestamps = true; 613 inject->tool.ordering_requires_timestamps = true;
517 /* Allow space in the header for new attributes */ 614 /* Allow space in the header for new attributes */
518 output_data_offset = 4096; 615 output_data_offset = 4096;
616 if (inject->strip)
617 strip_init(inject);
519 } 618 }
520 619
521 if (!inject->itrace_synth_opts.set) 620 if (!inject->itrace_synth_opts.set)
@@ -535,11 +634,28 @@ static int __cmd_inject(struct perf_inject *inject)
535 } 634 }
536 /* 635 /*
537 * The AUX areas have been removed and replaced with 636 * The AUX areas have been removed and replaced with
538 * synthesized hardware events, so clear the feature flag. 637 * synthesized hardware events, so clear the feature flag and
638 * remove the evsel.
539 */ 639 */
540 if (inject->itrace_synth_opts.set) 640 if (inject->itrace_synth_opts.set) {
641 struct perf_evsel *evsel;
642
541 perf_header__clear_feat(&session->header, 643 perf_header__clear_feat(&session->header,
542 HEADER_AUXTRACE); 644 HEADER_AUXTRACE);
645 if (inject->itrace_synth_opts.last_branch)
646 perf_header__set_feat(&session->header,
647 HEADER_BRANCH_STACK);
648 evsel = perf_evlist__id2evsel_strict(session->evlist,
649 inject->aux_id);
650 if (evsel) {
651 pr_debug("Deleting %s\n",
652 perf_evsel__name(evsel));
653 perf_evlist__remove(session->evlist, evsel);
654 perf_evsel__delete(evsel);
655 }
656 if (inject->strip)
657 strip_fini(inject);
658 }
543 session->header.data_offset = output_data_offset; 659 session->header.data_offset = output_data_offset;
544 session->header.data_size = inject->bytes_written; 660 session->header.data_size = inject->bytes_written;
545 perf_session__write_header(session, session->evlist, fd, true); 661 perf_session__write_header(session, session->evlist, fd, true);
@@ -604,6 +720,8 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
604 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts, 720 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
605 NULL, "opts", "Instruction Tracing options", 721 NULL, "opts", "Instruction Tracing options",
606 itrace_parse_synth_opts), 722 itrace_parse_synth_opts),
723 OPT_BOOLEAN(0, "strip", &inject.strip,
724 "strip non-synthesized events (use with --itrace)"),
607 OPT_END() 725 OPT_END()
608 }; 726 };
609 const char * const inject_usage[] = { 727 const char * const inject_usage[] = {
@@ -619,6 +737,11 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
619 if (argc) 737 if (argc)
620 usage_with_options(inject_usage, options); 738 usage_with_options(inject_usage, options);
621 739
740 if (inject.strip && !inject.itrace_synth_opts.set) {
741 pr_err("--strip option requires --itrace option\n");
742 return -1;
743 }
744
622 if (perf_data_file__open(&inject.output)) { 745 if (perf_data_file__open(&inject.output)) {
623 perror("failed to create output file"); 746 perror("failed to create output file");
624 return -1; 747 return -1;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 23b1faaaa4cc..93ce665f976f 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -329,7 +329,7 @@ static int build_alloc_func_list(void)
329 return -EINVAL; 329 return -EINVAL;
330 } 330 }
331 331
332 kernel_map = machine->vmlinux_maps[MAP__FUNCTION]; 332 kernel_map = machine__kernel_map(machine);
333 if (map__load(kernel_map, NULL) < 0) { 333 if (map__load(kernel_map, NULL) < 0) {
334 pr_err("cannot load kernel map\n"); 334 pr_err("cannot load kernel map\n");
335 return -ENOENT; 335 return -ENOENT;
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index fc1cffb1b7a2..dd94b4ca2213 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -13,7 +13,6 @@
13#include "util/parse-options.h" 13#include "util/parse-options.h"
14#include "util/trace-event.h" 14#include "util/trace-event.h"
15#include "util/debug.h" 15#include "util/debug.h"
16#include <api/fs/debugfs.h>
17#include "util/tool.h" 16#include "util/tool.h"
18#include "util/stat.h" 17#include "util/stat.h"
19#include "util/top.h" 18#include "util/top.h"
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index af5bd0514108..bf679e2c978b 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -36,7 +36,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
36 36
37 setup_pager(); 37 setup_pager();
38 38
39 if (!raw_dump) 39 if (!raw_dump && pager_in_use())
40 printf("\nList of pre-defined events (to be used in -e):\n\n"); 40 printf("\nList of pre-defined events (to be used in -e):\n\n");
41 41
42 if (argc == 0) { 42 if (argc == 0) {
@@ -45,6 +45,8 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
45 } 45 }
46 46
47 for (i = 0; i < argc; ++i) { 47 for (i = 0; i < argc; ++i) {
48 char *sep, *s;
49
48 if (strcmp(argv[i], "tracepoint") == 0) 50 if (strcmp(argv[i], "tracepoint") == 0)
49 print_tracepoint_events(NULL, NULL, raw_dump); 51 print_tracepoint_events(NULL, NULL, raw_dump);
50 else if (strcmp(argv[i], "hw") == 0 || 52 else if (strcmp(argv[i], "hw") == 0 ||
@@ -60,8 +62,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
60 print_hwcache_events(NULL, raw_dump); 62 print_hwcache_events(NULL, raw_dump);
61 else if (strcmp(argv[i], "pmu") == 0) 63 else if (strcmp(argv[i], "pmu") == 0)
62 print_pmu_events(NULL, raw_dump); 64 print_pmu_events(NULL, raw_dump);
63 else { 65 else if ((sep = strchr(argv[i], ':')) != NULL) {
64 char *sep = strchr(argv[i], ':'), *s;
65 int sep_idx; 66 int sep_idx;
66 67
67 if (sep == NULL) { 68 if (sep == NULL) {
@@ -76,6 +77,19 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
76 s[sep_idx] = '\0'; 77 s[sep_idx] = '\0';
77 print_tracepoint_events(s, s + sep_idx + 1, raw_dump); 78 print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
78 free(s); 79 free(s);
80 } else {
81 if (asprintf(&s, "*%s*", argv[i]) < 0) {
82 printf("Critical: Not enough memory! Trying to continue...\n");
83 continue;
84 }
85 print_symbol_events(s, PERF_TYPE_HARDWARE,
86 event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
87 print_symbol_events(s, PERF_TYPE_SOFTWARE,
88 event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
89 print_hwcache_events(s, raw_dump);
90 print_pmu_events(s, raw_dump);
91 print_tracepoint_events(NULL, s, raw_dump);
92 free(s);
79 } 93 }
80 } 94 }
81 return 0; 95 return 0;
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index b81cec33b4b2..132afc97676c 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -37,10 +37,10 @@
37#include "util/strfilter.h" 37#include "util/strfilter.h"
38#include "util/symbol.h" 38#include "util/symbol.h"
39#include "util/debug.h" 39#include "util/debug.h"
40#include <api/fs/debugfs.h>
41#include "util/parse-options.h" 40#include "util/parse-options.h"
42#include "util/probe-finder.h" 41#include "util/probe-finder.h"
43#include "util/probe-event.h" 42#include "util/probe-event.h"
43#include "util/probe-file.h"
44 44
45#define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*" 45#define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
46#define DEFAULT_FUNC_FILTER "!_*" 46#define DEFAULT_FUNC_FILTER "!_*"
@@ -182,10 +182,8 @@ static int opt_set_target(const struct option *opt, const char *str,
182 if (str) { 182 if (str) {
183 if (!strcmp(opt->long_name, "exec")) 183 if (!strcmp(opt->long_name, "exec"))
184 params.uprobes = true; 184 params.uprobes = true;
185#ifdef HAVE_DWARF_SUPPORT
186 else if (!strcmp(opt->long_name, "module")) 185 else if (!strcmp(opt->long_name, "module"))
187 params.uprobes = false; 186 params.uprobes = false;
188#endif
189 else 187 else
190 return ret; 188 return ret;
191 189
@@ -311,6 +309,119 @@ static void pr_err_with_code(const char *msg, int err)
311 pr_err("\n"); 309 pr_err("\n");
312} 310}
313 311
312static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
313{
314 int ret;
315 int i, k;
316 const char *event = NULL, *group = NULL;
317
318 ret = init_probe_symbol_maps(pevs->uprobes);
319 if (ret < 0)
320 return ret;
321
322 ret = convert_perf_probe_events(pevs, npevs);
323 if (ret < 0)
324 goto out_cleanup;
325
326 ret = apply_perf_probe_events(pevs, npevs);
327 if (ret < 0)
328 goto out_cleanup;
329
330 for (i = k = 0; i < npevs; i++)
331 k += pevs[i].ntevs;
332
333 pr_info("Added new event%s\n", (k > 1) ? "s:" : ":");
334 for (i = 0; i < npevs; i++) {
335 struct perf_probe_event *pev = &pevs[i];
336
337 for (k = 0; k < pev->ntevs; k++) {
338 struct probe_trace_event *tev = &pev->tevs[k];
339
340 /* We use tev's name for showing new events */
341 show_perf_probe_event(tev->group, tev->event, pev,
342 tev->point.module, false);
343
344 /* Save the last valid name */
345 event = tev->event;
346 group = tev->group;
347 }
348 }
349
350 /* Note that it is possible to skip all events because of blacklist */
351 if (event) {
352 /* Show how to use the event. */
353 pr_info("\nYou can now use it in all perf tools, such as:\n\n");
354 pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", group, event);
355 }
356
357out_cleanup:
358 cleanup_perf_probe_events(pevs, npevs);
359 exit_probe_symbol_maps();
360 return ret;
361}
362
363static int perf_del_probe_events(struct strfilter *filter)
364{
365 int ret, ret2, ufd = -1, kfd = -1;
366 char *str = strfilter__string(filter);
367 struct strlist *klist = NULL, *ulist = NULL;
368 struct str_node *ent;
369
370 if (!str)
371 return -EINVAL;
372
373 pr_debug("Delete filter: \'%s\'\n", str);
374
375 /* Get current event names */
376 ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW);
377 if (ret < 0)
378 goto out;
379
380 klist = strlist__new(NULL, NULL);
381 ulist = strlist__new(NULL, NULL);
382 if (!klist || !ulist) {
383 ret = -ENOMEM;
384 goto out;
385 }
386
387 ret = probe_file__get_events(kfd, filter, klist);
388 if (ret == 0) {
389 strlist__for_each(ent, klist)
390 pr_info("Removed event: %s\n", ent->s);
391
392 ret = probe_file__del_strlist(kfd, klist);
393 if (ret < 0)
394 goto error;
395 }
396
397 ret2 = probe_file__get_events(ufd, filter, ulist);
398 if (ret2 == 0) {
399 strlist__for_each(ent, ulist)
400 pr_info("Removed event: %s\n", ent->s);
401
402 ret2 = probe_file__del_strlist(ufd, ulist);
403 if (ret2 < 0)
404 goto error;
405 }
406
407 if (ret == -ENOENT && ret2 == -ENOENT)
408 pr_debug("\"%s\" does not hit any event.\n", str);
409 /* Note that this is silently ignored */
410 ret = 0;
411
412error:
413 if (kfd >= 0)
414 close(kfd);
415 if (ufd >= 0)
416 close(ufd);
417out:
418 strlist__delete(klist);
419 strlist__delete(ulist);
420 free(str);
421
422 return ret;
423}
424
314static int 425static int
315__cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) 426__cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
316{ 427{
@@ -377,9 +488,6 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
377 "file", "vmlinux pathname"), 488 "file", "vmlinux pathname"),
378 OPT_STRING('s', "source", &symbol_conf.source_prefix, 489 OPT_STRING('s', "source", &symbol_conf.source_prefix,
379 "directory", "path to kernel source"), 490 "directory", "path to kernel source"),
380 OPT_CALLBACK('m', "module", NULL, "modname|path",
381 "target module name (for online) or path (for offline)",
382 opt_set_target),
383 OPT_BOOLEAN('\0', "no-inlines", &probe_conf.no_inlines, 491 OPT_BOOLEAN('\0', "no-inlines", &probe_conf.no_inlines,
384 "Don't search inlined functions"), 492 "Don't search inlined functions"),
385#endif 493#endif
@@ -396,6 +504,9 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
396 opt_set_filter), 504 opt_set_filter),
397 OPT_CALLBACK('x', "exec", NULL, "executable|path", 505 OPT_CALLBACK('x', "exec", NULL, "executable|path",
398 "target executable name or path", opt_set_target), 506 "target executable name or path", opt_set_target),
507 OPT_CALLBACK('m', "module", NULL, "modname|path",
508 "target module name (for online) or path (for offline)",
509 opt_set_target),
399 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, 510 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
400 "Enable symbol demangling"), 511 "Enable symbol demangling"),
401 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, 512 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
@@ -417,12 +528,12 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
417 PARSE_OPT_STOP_AT_NON_OPTION); 528 PARSE_OPT_STOP_AT_NON_OPTION);
418 if (argc > 0) { 529 if (argc > 0) {
419 if (strcmp(argv[0], "-") == 0) { 530 if (strcmp(argv[0], "-") == 0) {
420 pr_warning(" Error: '-' is not supported.\n"); 531 usage_with_options_msg(probe_usage, options,
421 usage_with_options(probe_usage, options); 532 "'-' is not supported.\n");
422 } 533 }
423 if (params.command && params.command != 'a') { 534 if (params.command && params.command != 'a') {
424 pr_warning(" Error: another command except --add is set.\n"); 535 usage_with_options_msg(probe_usage, options,
425 usage_with_options(probe_usage, options); 536 "another command except --add is set.\n");
426 } 537 }
427 ret = parse_probe_event_argv(argc, argv); 538 ret = parse_probe_event_argv(argc, argv);
428 if (ret < 0) { 539 if (ret < 0) {
@@ -451,8 +562,10 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
451 switch (params.command) { 562 switch (params.command) {
452 case 'l': 563 case 'l':
453 if (params.uprobes) { 564 if (params.uprobes) {
454 pr_warning(" Error: Don't use --list with --exec.\n"); 565 pr_err(" Error: Don't use --list with --exec.\n");
455 usage_with_options(probe_usage, options); 566 parse_options_usage(probe_usage, options, "l", true);
567 parse_options_usage(NULL, options, "x", true);
568 return -EINVAL;
456 } 569 }
457 ret = show_perf_probe_events(params.filter); 570 ret = show_perf_probe_events(params.filter);
458 if (ret < 0) 571 if (ret < 0)
@@ -483,7 +596,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
483 return ret; 596 return ret;
484#endif 597#endif
485 case 'd': 598 case 'd':
486 ret = del_perf_probe_events(params.filter); 599 ret = perf_del_probe_events(params.filter);
487 if (ret < 0) { 600 if (ret < 0) {
488 pr_err_with_code(" Error: Failed to delete events.", ret); 601 pr_err_with_code(" Error: Failed to delete events.", ret);
489 return ret; 602 return ret;
@@ -492,11 +605,13 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
492 case 'a': 605 case 'a':
493 /* Ensure the last given target is used */ 606 /* Ensure the last given target is used */
494 if (params.target && !params.target_used) { 607 if (params.target && !params.target_used) {
495 pr_warning(" Error: -x/-m must follow the probe definitions.\n"); 608 pr_err(" Error: -x/-m must follow the probe definitions.\n");
496 usage_with_options(probe_usage, options); 609 parse_options_usage(probe_usage, options, "m", true);
610 parse_options_usage(NULL, options, "x", true);
611 return -EINVAL;
497 } 612 }
498 613
499 ret = add_perf_probe_events(params.events, params.nevents); 614 ret = perf_add_probe_events(params.events, params.nevents);
500 if (ret < 0) { 615 if (ret < 0) {
501 pr_err_with_code(" Error: Failed to add events.", ret); 616 pr_err_with_code(" Error: Failed to add events.", ret);
502 return ret; 617 return ret;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 142eeb341b29..199fc31e3919 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -31,6 +31,7 @@
31#include "util/auxtrace.h" 31#include "util/auxtrace.h"
32#include "util/parse-branch-options.h" 32#include "util/parse-branch-options.h"
33#include "util/parse-regs-options.h" 33#include "util/parse-regs-options.h"
34#include "util/llvm-utils.h"
34 35
35#include <unistd.h> 36#include <unistd.h>
36#include <sched.h> 37#include <sched.h>
@@ -49,7 +50,7 @@ struct record {
49 int realtime_prio; 50 int realtime_prio;
50 bool no_buildid; 51 bool no_buildid;
51 bool no_buildid_cache; 52 bool no_buildid_cache;
52 long samples; 53 unsigned long long samples;
53}; 54};
54 55
55static int record__write(struct record *rec, void *bf, size_t size) 56static int record__write(struct record *rec, void *bf, size_t size)
@@ -636,8 +637,29 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
636 /* 637 /*
637 * Let the child rip 638 * Let the child rip
638 */ 639 */
639 if (forks) 640 if (forks) {
641 union perf_event *event;
642
643 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
644 if (event == NULL) {
645 err = -ENOMEM;
646 goto out_child;
647 }
648
649 /*
650 * Some H/W events are generated before COMM event
651 * which is emitted during exec(), so perf script
652 * cannot see a correct process name for those events.
653 * Synthesize COMM event to prevent it.
654 */
655 perf_event__synthesize_comm(tool, event,
656 rec->evlist->workload.pid,
657 process_synthesized_event,
658 machine);
659 free(event);
660
640 perf_evlist__start_workload(rec->evlist); 661 perf_evlist__start_workload(rec->evlist);
662 }
641 663
642 if (opts->initial_delay) { 664 if (opts->initial_delay) {
643 usleep(opts->initial_delay * 1000); 665 usleep(opts->initial_delay * 1000);
@@ -646,7 +668,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
646 668
647 auxtrace_snapshot_enabled = 1; 669 auxtrace_snapshot_enabled = 1;
648 for (;;) { 670 for (;;) {
649 int hits = rec->samples; 671 unsigned long long hits = rec->samples;
650 672
651 if (record__mmap_read_all(rec) < 0) { 673 if (record__mmap_read_all(rec) < 0) {
652 auxtrace_snapshot_enabled = 0; 674 auxtrace_snapshot_enabled = 0;
@@ -989,13 +1011,8 @@ static struct record record = {
989 }, 1011 },
990}; 1012};
991 1013
992#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: " 1014const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
993 1015 "\n\t\t\t\tDefault: fp";
994#ifdef HAVE_DWARF_UNWIND_SUPPORT
995const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf lbr";
996#else
997const char record_callchain_help[] = CALLCHAIN_HELP "fp lbr";
998#endif
999 1016
1000/* 1017/*
1001 * XXX Will stay a global variable till we fix builtin-script.c to stop messing 1018 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
@@ -1043,7 +1060,7 @@ struct option __record_options[] = {
1043 NULL, "enables call-graph recording" , 1060 NULL, "enables call-graph recording" ,
1044 &record_callchain_opt), 1061 &record_callchain_opt),
1045 OPT_CALLBACK(0, "call-graph", &record.opts, 1062 OPT_CALLBACK(0, "call-graph", &record.opts,
1046 "mode[,dump_size]", record_callchain_help, 1063 "record_mode[,record_size]", record_callchain_help,
1047 &record_parse_callchain_opt), 1064 &record_parse_callchain_opt),
1048 OPT_INCR('v', "verbose", &verbose, 1065 OPT_INCR('v', "verbose", &verbose,
1049 "be more verbose (show counter open errors, etc)"), 1066 "be more verbose (show counter open errors, etc)"),
@@ -1096,6 +1113,12 @@ struct option __record_options[] = {
1096 "per thread proc mmap processing timeout in ms"), 1113 "per thread proc mmap processing timeout in ms"),
1097 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events, 1114 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1098 "Record context switch events"), 1115 "Record context switch events"),
1116#ifdef HAVE_LIBBPF_SUPPORT
1117 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1118 "clang binary to use for compiling BPF scriptlets"),
1119 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1120 "options passed to clang when compiling BPF scriptlets"),
1121#endif
1099 OPT_END() 1122 OPT_END()
1100}; 1123};
1101 1124
@@ -1119,14 +1142,15 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
1119 usage_with_options(record_usage, record_options); 1142 usage_with_options(record_usage, record_options);
1120 1143
1121 if (nr_cgroups && !rec->opts.target.system_wide) { 1144 if (nr_cgroups && !rec->opts.target.system_wide) {
1122 ui__error("cgroup monitoring only available in" 1145 usage_with_options_msg(record_usage, record_options,
1123 " system-wide mode\n"); 1146 "cgroup monitoring only available in system-wide mode");
1124 usage_with_options(record_usage, record_options); 1147
1125 } 1148 }
1126 if (rec->opts.record_switch_events && 1149 if (rec->opts.record_switch_events &&
1127 !perf_can_record_switch_events()) { 1150 !perf_can_record_switch_events()) {
1128 ui__error("kernel does not support recording context switch events (--switch-events option)\n"); 1151 ui__error("kernel does not support recording context switch events\n");
1129 usage_with_options(record_usage, record_options); 1152 parse_options_usage(record_usage, record_options, "switch-events", 0);
1153 return -EINVAL;
1130 } 1154 }
1131 1155
1132 if (!rec->itr) { 1156 if (!rec->itr) {
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 62b285e32aa5..2853ad2bd435 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -62,6 +62,7 @@ struct report {
62 float min_percent; 62 float min_percent;
63 u64 nr_entries; 63 u64 nr_entries;
64 u64 queue_size; 64 u64 queue_size;
65 int socket_filter;
65 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 66 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
66}; 67};
67 68
@@ -162,14 +163,21 @@ static int process_sample_event(struct perf_tool *tool,
162 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) 163 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
163 goto out_put; 164 goto out_put;
164 165
165 if (sort__mode == SORT_MODE__BRANCH) 166 if (sort__mode == SORT_MODE__BRANCH) {
167 /*
168 * A non-synthesized event might not have a branch stack if
169 * branch stacks have been synthesized (using itrace options).
170 */
171 if (!sample->branch_stack)
172 goto out_put;
166 iter.ops = &hist_iter_branch; 173 iter.ops = &hist_iter_branch;
167 else if (rep->mem_mode) 174 } else if (rep->mem_mode) {
168 iter.ops = &hist_iter_mem; 175 iter.ops = &hist_iter_mem;
169 else if (symbol_conf.cumulate_callchain) 176 } else if (symbol_conf.cumulate_callchain) {
170 iter.ops = &hist_iter_cumulative; 177 iter.ops = &hist_iter_cumulative;
171 else 178 } else {
172 iter.ops = &hist_iter_normal; 179 iter.ops = &hist_iter_normal;
180 }
173 181
174 if (al.map != NULL) 182 if (al.map != NULL)
175 al.map->dso->hit = 1; 183 al.map->dso->hit = 1;
@@ -213,6 +221,15 @@ static int report__setup_sample_type(struct report *rep)
213 u64 sample_type = perf_evlist__combined_sample_type(session->evlist); 221 u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
214 bool is_pipe = perf_data_file__is_pipe(session->file); 222 bool is_pipe = perf_data_file__is_pipe(session->file);
215 223
224 if (session->itrace_synth_opts->callchain ||
225 (!is_pipe &&
226 perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
227 !session->itrace_synth_opts->set))
228 sample_type |= PERF_SAMPLE_CALLCHAIN;
229
230 if (session->itrace_synth_opts->last_branch)
231 sample_type |= PERF_SAMPLE_BRANCH_STACK;
232
216 if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) { 233 if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
217 if (sort__has_parent) { 234 if (sort__has_parent) {
218 ui__error("Selected --sort parent, but no " 235 ui__error("Selected --sort parent, but no "
@@ -286,6 +303,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
286 struct perf_evsel *evsel = hists_to_evsel(hists); 303 struct perf_evsel *evsel = hists_to_evsel(hists);
287 char buf[512]; 304 char buf[512];
288 size_t size = sizeof(buf); 305 size_t size = sizeof(buf);
306 int socked_id = hists->socket_filter;
289 307
290 if (symbol_conf.filter_relative) { 308 if (symbol_conf.filter_relative) {
291 nr_samples = hists->stats.nr_non_filtered_samples; 309 nr_samples = hists->stats.nr_non_filtered_samples;
@@ -326,6 +344,10 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
326 ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order); 344 ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order);
327 } else 345 } else
328 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events); 346 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
347
348 if (socked_id > -1)
349 ret += fprintf(fp, "\n# Processor Socket: %d", socked_id);
350
329 return ret + fprintf(fp, "\n#\n"); 351 return ret + fprintf(fp, "\n#\n");
330} 352}
331 353
@@ -365,7 +387,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
365 387
366static void report__warn_kptr_restrict(const struct report *rep) 388static void report__warn_kptr_restrict(const struct report *rep)
367{ 389{
368 struct map *kernel_map = rep->session->machines.host.vmlinux_maps[MAP__FUNCTION]; 390 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
369 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL; 391 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
370 392
371 if (kernel_map == NULL || 393 if (kernel_map == NULL ||
@@ -450,6 +472,8 @@ static void report__collapse_hists(struct report *rep)
450 if (pos->idx == 0) 472 if (pos->idx == 0)
451 hists->symbol_filter_str = rep->symbol_filter_str; 473 hists->symbol_filter_str = rep->symbol_filter_str;
452 474
475 hists->socket_filter = rep->socket_filter;
476
453 hists__collapse_resort(hists, &prog); 477 hists__collapse_resort(hists, &prog);
454 478
455 /* Non-group events are considered as leader */ 479 /* Non-group events are considered as leader */
@@ -601,6 +625,12 @@ parse_percent_limit(const struct option *opt, const char *str,
601 return 0; 625 return 0;
602} 626}
603 627
628#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function"
629
630const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
631 CALLCHAIN_REPORT_HELP
632 "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
633
604int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) 634int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
605{ 635{
606 struct perf_session *session; 636 struct perf_session *session;
@@ -609,7 +639,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
609 bool has_br_stack = false; 639 bool has_br_stack = false;
610 int branch_mode = -1; 640 int branch_mode = -1;
611 bool branch_call_mode = false; 641 bool branch_call_mode = false;
612 char callchain_default_opt[] = "fractal,0.5,callee"; 642 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
613 const char * const report_usage[] = { 643 const char * const report_usage[] = {
614 "perf report [<options>]", 644 "perf report [<options>]",
615 NULL 645 NULL
@@ -635,6 +665,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
635 }, 665 },
636 .max_stack = PERF_MAX_STACK_DEPTH, 666 .max_stack = PERF_MAX_STACK_DEPTH,
637 .pretty_printing_style = "normal", 667 .pretty_printing_style = "normal",
668 .socket_filter = -1,
638 }; 669 };
639 const struct option options[] = { 670 const struct option options[] = {
640 OPT_STRING('i', "input", &input_name, "file", 671 OPT_STRING('i', "input", &input_name, "file",
@@ -668,15 +699,18 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
668 " Please refer the man page for the complete list."), 699 " Please refer the man page for the complete list."),
669 OPT_STRING('F', "fields", &field_order, "key[,keys...]", 700 OPT_STRING('F', "fields", &field_order, "key[,keys...]",
670 "output field(s): overhead, period, sample plus all of sort keys"), 701 "output field(s): overhead, period, sample plus all of sort keys"),
671 OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, 702 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
672 "Show sample percentage for different cpu modes"), 703 "Show sample percentage for different cpu modes"),
704 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
705 "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN),
673 OPT_STRING('p', "parent", &parent_pattern, "regex", 706 OPT_STRING('p', "parent", &parent_pattern, "regex",
674 "regex filter to identify parent, see: '--sort parent'"), 707 "regex filter to identify parent, see: '--sort parent'"),
675 OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, 708 OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
676 "Only display entries with parent-match"), 709 "Only display entries with parent-match"),
677 OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order[,branch]", 710 OPT_CALLBACK_DEFAULT('g', "call-graph", &report,
678 "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit, callchain order, key (function or address), add branches. " 711 "print_type,threshold[,print_limit],order,sort_key[,branch]",
679 "Default: fractal,0.5,callee,function", &report_parse_callchain_opt, callchain_default_opt), 712 report_callchain_help, &report_parse_callchain_opt,
713 callchain_default_opt),
680 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain, 714 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
681 "Accumulate callchains of children and show total overhead as well"), 715 "Accumulate callchains of children and show total overhead as well"),
682 OPT_INTEGER(0, "max-stack", &report.max_stack, 716 OPT_INTEGER(0, "max-stack", &report.max_stack,
@@ -747,6 +781,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
747 "Show full source file name path for source lines"), 781 "Show full source file name path for source lines"),
748 OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph, 782 OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
749 "Show callgraph from reference event"), 783 "Show callgraph from reference event"),
784 OPT_INTEGER(0, "socket-filter", &report.socket_filter,
785 "only show processor socket that match with this filter"),
750 OPT_END() 786 OPT_END()
751 }; 787 };
752 struct perf_data_file file = { 788 struct perf_data_file file = {
@@ -781,6 +817,12 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
781 817
782 if (report.inverted_callchain) 818 if (report.inverted_callchain)
783 callchain_param.order = ORDER_CALLER; 819 callchain_param.order = ORDER_CALLER;
820 if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
821 callchain_param.order = ORDER_CALLER;
822
823 if (itrace_synth_opts.callchain &&
824 (int)itrace_synth_opts.callchain_sz > report.max_stack)
825 report.max_stack = itrace_synth_opts.callchain_sz;
784 826
785 if (!input_name || !strlen(input_name)) { 827 if (!input_name || !strlen(input_name)) {
786 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 828 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
@@ -809,6 +851,9 @@ repeat:
809 has_br_stack = perf_header__has_feat(&session->header, 851 has_br_stack = perf_header__has_feat(&session->header,
810 HEADER_BRANCH_STACK); 852 HEADER_BRANCH_STACK);
811 853
854 if (itrace_synth_opts.last_branch)
855 has_br_stack = true;
856
812 /* 857 /*
813 * Branch mode is a tristate: 858 * Branch mode is a tristate:
814 * -1 means default, so decide based on the file having branch data. 859 * -1 means default, so decide based on the file having branch data.
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 33962612a5e9..0ee6d900e100 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1728,8 +1728,8 @@ static void setup_sorting(struct perf_sched *sched, const struct option *options
1728 for (tok = strtok_r(str, ", ", &tmp); 1728 for (tok = strtok_r(str, ", ", &tmp);
1729 tok; tok = strtok_r(NULL, ", ", &tmp)) { 1729 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1730 if (sort_dimension__add(tok, &sched->sort_list) < 0) { 1730 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
1731 error("Unknown --sort key: `%s'", tok); 1731 usage_with_options_msg(usage_msg, options,
1732 usage_with_options(usage_msg, options); 1732 "Unknown --sort key: `%s'", tok);
1733 } 1733 }
1734 } 1734 }
1735 1735
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 284a76e04628..72b5deb4bd79 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -29,9 +29,12 @@ static bool no_callchain;
29static bool latency_format; 29static bool latency_format;
30static bool system_wide; 30static bool system_wide;
31static bool print_flags; 31static bool print_flags;
32static bool nanosecs;
32static const char *cpu_list; 33static const char *cpu_list;
33static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 34static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
34 35
36unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
37
35enum perf_output_field { 38enum perf_output_field {
36 PERF_OUTPUT_COMM = 1U << 0, 39 PERF_OUTPUT_COMM = 1U << 0,
37 PERF_OUTPUT_TID = 1U << 1, 40 PERF_OUTPUT_TID = 1U << 1,
@@ -48,6 +51,8 @@ enum perf_output_field {
48 PERF_OUTPUT_SRCLINE = 1U << 12, 51 PERF_OUTPUT_SRCLINE = 1U << 12,
49 PERF_OUTPUT_PERIOD = 1U << 13, 52 PERF_OUTPUT_PERIOD = 1U << 13,
50 PERF_OUTPUT_IREGS = 1U << 14, 53 PERF_OUTPUT_IREGS = 1U << 14,
54 PERF_OUTPUT_BRSTACK = 1U << 15,
55 PERF_OUTPUT_BRSTACKSYM = 1U << 16,
51}; 56};
52 57
53struct output_option { 58struct output_option {
@@ -69,6 +74,8 @@ struct output_option {
69 {.str = "srcline", .field = PERF_OUTPUT_SRCLINE}, 74 {.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
70 {.str = "period", .field = PERF_OUTPUT_PERIOD}, 75 {.str = "period", .field = PERF_OUTPUT_PERIOD},
71 {.str = "iregs", .field = PERF_OUTPUT_IREGS}, 76 {.str = "iregs", .field = PERF_OUTPUT_IREGS},
77 {.str = "brstack", .field = PERF_OUTPUT_BRSTACK},
78 {.str = "brstacksym", .field = PERF_OUTPUT_BRSTACKSYM},
72}; 79};
73 80
74/* default set to maintain compatibility with current format */ 81/* default set to maintain compatibility with current format */
@@ -415,10 +422,84 @@ static void print_sample_start(struct perf_sample *sample,
415 secs = nsecs / NSECS_PER_SEC; 422 secs = nsecs / NSECS_PER_SEC;
416 nsecs -= secs * NSECS_PER_SEC; 423 nsecs -= secs * NSECS_PER_SEC;
417 usecs = nsecs / NSECS_PER_USEC; 424 usecs = nsecs / NSECS_PER_USEC;
418 printf("%5lu.%06lu: ", secs, usecs); 425 if (nanosecs)
426 printf("%5lu.%09llu: ", secs, nsecs);
427 else
428 printf("%5lu.%06lu: ", secs, usecs);
419 } 429 }
420} 430}
421 431
432static inline char
433mispred_str(struct branch_entry *br)
434{
435 if (!(br->flags.mispred || br->flags.predicted))
436 return '-';
437
438 return br->flags.predicted ? 'P' : 'M';
439}
440
441static void print_sample_brstack(union perf_event *event __maybe_unused,
442 struct perf_sample *sample,
443 struct thread *thread __maybe_unused,
444 struct perf_event_attr *attr __maybe_unused)
445{
446 struct branch_stack *br = sample->branch_stack;
447 u64 i;
448
449 if (!(br && br->nr))
450 return;
451
452 for (i = 0; i < br->nr; i++) {
453 printf(" 0x%"PRIx64"/0x%"PRIx64"/%c/%c/%c/%d ",
454 br->entries[i].from,
455 br->entries[i].to,
456 mispred_str( br->entries + i),
457 br->entries[i].flags.in_tx? 'X' : '-',
458 br->entries[i].flags.abort? 'A' : '-',
459 br->entries[i].flags.cycles);
460 }
461}
462
463static void print_sample_brstacksym(union perf_event *event __maybe_unused,
464 struct perf_sample *sample,
465 struct thread *thread __maybe_unused,
466 struct perf_event_attr *attr __maybe_unused)
467{
468 struct branch_stack *br = sample->branch_stack;
469 struct addr_location alf, alt;
470 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
471 u64 i, from, to;
472
473 if (!(br && br->nr))
474 return;
475
476 for (i = 0; i < br->nr; i++) {
477
478 memset(&alf, 0, sizeof(alf));
479 memset(&alt, 0, sizeof(alt));
480 from = br->entries[i].from;
481 to = br->entries[i].to;
482
483 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, from, &alf);
484 if (alf.map)
485 alf.sym = map__find_symbol(alf.map, alf.addr, NULL);
486
487 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, to, &alt);
488 if (alt.map)
489 alt.sym = map__find_symbol(alt.map, alt.addr, NULL);
490
491 symbol__fprintf_symname_offs(alf.sym, &alf, stdout);
492 putchar('/');
493 symbol__fprintf_symname_offs(alt.sym, &alt, stdout);
494 printf("/%c/%c/%c/%d ",
495 mispred_str( br->entries + i),
496 br->entries[i].flags.in_tx? 'X' : '-',
497 br->entries[i].flags.abort? 'A' : '-',
498 br->entries[i].flags.cycles);
499 }
500}
501
502
422static void print_sample_addr(union perf_event *event, 503static void print_sample_addr(union perf_event *event,
423 struct perf_sample *sample, 504 struct perf_sample *sample,
424 struct thread *thread, 505 struct thread *thread,
@@ -471,7 +552,7 @@ static void print_sample_bts(union perf_event *event,
471 } 552 }
472 } 553 }
473 perf_evsel__print_ip(evsel, sample, al, print_opts, 554 perf_evsel__print_ip(evsel, sample, al, print_opts,
474 PERF_MAX_STACK_DEPTH); 555 scripting_max_stack);
475 } 556 }
476 557
477 /* print branch_to information */ 558 /* print branch_to information */
@@ -548,12 +629,17 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
548 629
549 perf_evsel__print_ip(evsel, sample, al, 630 perf_evsel__print_ip(evsel, sample, al,
550 output[attr->type].print_ip_opts, 631 output[attr->type].print_ip_opts,
551 PERF_MAX_STACK_DEPTH); 632 scripting_max_stack);
552 } 633 }
553 634
554 if (PRINT_FIELD(IREGS)) 635 if (PRINT_FIELD(IREGS))
555 print_sample_iregs(event, sample, thread, attr); 636 print_sample_iregs(event, sample, thread, attr);
556 637
638 if (PRINT_FIELD(BRSTACK))
639 print_sample_brstack(event, sample, thread, attr);
640 else if (PRINT_FIELD(BRSTACKSYM))
641 print_sample_brstacksym(event, sample, thread, attr);
642
557 printf("\n"); 643 printf("\n");
558} 644}
559 645
@@ -680,7 +766,10 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
680 766
681 set_print_ip_opts(&evsel->attr); 767 set_print_ip_opts(&evsel->attr);
682 768
683 return perf_evsel__check_attr(evsel, scr->session); 769 if (evsel->attr.sample_type)
770 err = perf_evsel__check_attr(evsel, scr->session);
771
772 return err;
684} 773}
685 774
686static int process_comm_event(struct perf_tool *tool, 775static int process_comm_event(struct perf_tool *tool,
@@ -1672,7 +1761,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
1672 "comma separated output fields prepend with 'type:'. " 1761 "comma separated output fields prepend with 'type:'. "
1673 "Valid types: hw,sw,trace,raw. " 1762 "Valid types: hw,sw,trace,raw. "
1674 "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," 1763 "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
1675 "addr,symoff,period,iregs,flags", parse_output_fields), 1764 "addr,symoff,period,iregs,brstack,brstacksym,flags", parse_output_fields),
1676 OPT_BOOLEAN('a', "all-cpus", &system_wide, 1765 OPT_BOOLEAN('a', "all-cpus", &system_wide,
1677 "system-wide collection from all CPUs"), 1766 "system-wide collection from all CPUs"),
1678 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 1767 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
@@ -1695,6 +1784,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
1695 OPT_BOOLEAN('\0', "show-switch-events", &script.show_switch_events, 1784 OPT_BOOLEAN('\0', "show-switch-events", &script.show_switch_events,
1696 "Show context switch events (if recorded)"), 1785 "Show context switch events (if recorded)"),
1697 OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"), 1786 OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
1787 OPT_BOOLEAN(0, "ns", &nanosecs,
1788 "Use 9 decimal places when displaying time"),
1698 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts", 1789 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
1699 "Instruction Tracing options", 1790 "Instruction Tracing options",
1700 itrace_parse_synth_opts), 1791 itrace_parse_synth_opts),
@@ -1740,6 +1831,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
1740 } 1831 }
1741 } 1832 }
1742 1833
1834 if (itrace_synth_opts.callchain &&
1835 itrace_synth_opts.callchain_sz > scripting_max_stack)
1836 scripting_max_stack = itrace_synth_opts.callchain_sz;
1837
1743 /* make sure PERF_EXEC_PATH is set for scripts */ 1838 /* make sure PERF_EXEC_PATH is set for scripts */
1744 perf_set_argv_exec_path(perf_exec_path()); 1839 perf_set_argv_exec_path(perf_exec_path());
1745 1840
@@ -1752,9 +1847,9 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
1752 rep_script_path = get_script_path(argv[0], REPORT_SUFFIX); 1847 rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
1753 1848
1754 if (!rec_script_path && !rep_script_path) { 1849 if (!rec_script_path && !rep_script_path) {
1755 fprintf(stderr, " Couldn't find script %s\n\n See perf" 1850 usage_with_options_msg(script_usage, options,
1851 "Couldn't find script `%s'\n\n See perf"
1756 " script -l for available scripts.\n", argv[0]); 1852 " script -l for available scripts.\n", argv[0]);
1757 usage_with_options(script_usage, options);
1758 } 1853 }
1759 1854
1760 if (is_top_script(argv[0])) { 1855 if (is_top_script(argv[0])) {
@@ -1765,10 +1860,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
1765 rep_args = has_required_arg(rep_script_path); 1860 rep_args = has_required_arg(rep_script_path);
1766 rec_args = (argc - 1) - rep_args; 1861 rec_args = (argc - 1) - rep_args;
1767 if (rec_args < 0) { 1862 if (rec_args < 0) {
1768 fprintf(stderr, " %s script requires options." 1863 usage_with_options_msg(script_usage, options,
1864 "`%s' script requires options."
1769 "\n\n See perf script -l for available " 1865 "\n\n See perf script -l for available "
1770 "scripts and options.\n", argv[0]); 1866 "scripts and options.\n", argv[0]);
1771 usage_with_options(script_usage, options);
1772 } 1867 }
1773 } 1868 }
1774 1869
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index d46dbb1bc65d..2f438f76cceb 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -100,6 +100,8 @@ static struct target target = {
100 .uid = UINT_MAX, 100 .uid = UINT_MAX,
101}; 101};
102 102
103typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
104
103static int run_count = 1; 105static int run_count = 1;
104static bool no_inherit = false; 106static bool no_inherit = false;
105static volatile pid_t child_pid = -1; 107static volatile pid_t child_pid = -1;
@@ -119,7 +121,7 @@ static unsigned int unit_width = 4; /* strlen("unit") */
119static bool forever = false; 121static bool forever = false;
120static struct timespec ref_time; 122static struct timespec ref_time;
121static struct cpu_map *aggr_map; 123static struct cpu_map *aggr_map;
122static int (*aggr_get_id)(struct cpu_map *m, int cpu); 124static aggr_get_id_t aggr_get_id;
123 125
124static volatile int done = 0; 126static volatile int done = 0;
125 127
@@ -215,7 +217,7 @@ static void read_counters(bool close_counters)
215 217
216 evlist__for_each(evsel_list, counter) { 218 evlist__for_each(evsel_list, counter) {
217 if (read_counter(counter)) 219 if (read_counter(counter))
218 pr_warning("failed to read counter %s\n", counter->name); 220 pr_debug("failed to read counter %s\n", counter->name);
219 221
220 if (perf_stat_process_counter(&stat_config, counter)) 222 if (perf_stat_process_counter(&stat_config, counter))
221 pr_warning("failed to process counter %s\n", counter->name); 223 pr_warning("failed to process counter %s\n", counter->name);
@@ -434,7 +436,7 @@ static void print_noise_pct(double total, double avg)
434 436
435static void print_noise(struct perf_evsel *evsel, double avg) 437static void print_noise(struct perf_evsel *evsel, double avg)
436{ 438{
437 struct perf_stat *ps; 439 struct perf_stat_evsel *ps;
438 440
439 if (run_count == 1) 441 if (run_count == 1)
440 return; 442 return;
@@ -479,6 +481,7 @@ static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
479 csv_sep); 481 csv_sep);
480 break; 482 break;
481 case AGGR_GLOBAL: 483 case AGGR_GLOBAL:
484 case AGGR_UNSET:
482 default: 485 default:
483 break; 486 break;
484 } 487 }
@@ -671,7 +674,7 @@ static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
671static void print_counter_aggr(struct perf_evsel *counter, char *prefix) 674static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
672{ 675{
673 FILE *output = stat_config.output; 676 FILE *output = stat_config.output;
674 struct perf_stat *ps = counter->priv; 677 struct perf_stat_evsel *ps = counter->priv;
675 double avg = avg_stats(&ps->res_stats[0]); 678 double avg = avg_stats(&ps->res_stats[0]);
676 int scaled = counter->counts->scaled; 679 int scaled = counter->counts->scaled;
677 double uval; 680 double uval;
@@ -799,6 +802,8 @@ static void print_interval(char *prefix, struct timespec *ts)
799 case AGGR_GLOBAL: 802 case AGGR_GLOBAL:
800 default: 803 default:
801 fprintf(output, "# time counts %*s events\n", unit_width, "unit"); 804 fprintf(output, "# time counts %*s events\n", unit_width, "unit");
805 case AGGR_UNSET:
806 break;
802 } 807 }
803 } 808 }
804 809
@@ -880,6 +885,7 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
880 evlist__for_each(evsel_list, counter) 885 evlist__for_each(evsel_list, counter)
881 print_counter(counter, prefix); 886 print_counter(counter, prefix);
882 break; 887 break;
888 case AGGR_UNSET:
883 default: 889 default:
884 break; 890 break;
885 } 891 }
@@ -940,30 +946,90 @@ static int stat__set_big_num(const struct option *opt __maybe_unused,
940 return 0; 946 return 0;
941} 947}
942 948
949static int perf_stat__get_socket(struct cpu_map *map, int cpu)
950{
951 return cpu_map__get_socket(map, cpu, NULL);
952}
953
954static int perf_stat__get_core(struct cpu_map *map, int cpu)
955{
956 return cpu_map__get_core(map, cpu, NULL);
957}
958
959static int cpu_map__get_max(struct cpu_map *map)
960{
961 int i, max = -1;
962
963 for (i = 0; i < map->nr; i++) {
964 if (map->map[i] > max)
965 max = map->map[i];
966 }
967
968 return max;
969}
970
971static struct cpu_map *cpus_aggr_map;
972
973static int perf_stat__get_aggr(aggr_get_id_t get_id, struct cpu_map *map, int idx)
974{
975 int cpu;
976
977 if (idx >= map->nr)
978 return -1;
979
980 cpu = map->map[idx];
981
982 if (cpus_aggr_map->map[cpu] == -1)
983 cpus_aggr_map->map[cpu] = get_id(map, idx);
984
985 return cpus_aggr_map->map[cpu];
986}
987
988static int perf_stat__get_socket_cached(struct cpu_map *map, int idx)
989{
990 return perf_stat__get_aggr(perf_stat__get_socket, map, idx);
991}
992
993static int perf_stat__get_core_cached(struct cpu_map *map, int idx)
994{
995 return perf_stat__get_aggr(perf_stat__get_core, map, idx);
996}
997
943static int perf_stat_init_aggr_mode(void) 998static int perf_stat_init_aggr_mode(void)
944{ 999{
1000 int nr;
1001
945 switch (stat_config.aggr_mode) { 1002 switch (stat_config.aggr_mode) {
946 case AGGR_SOCKET: 1003 case AGGR_SOCKET:
947 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) { 1004 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) {
948 perror("cannot build socket map"); 1005 perror("cannot build socket map");
949 return -1; 1006 return -1;
950 } 1007 }
951 aggr_get_id = cpu_map__get_socket; 1008 aggr_get_id = perf_stat__get_socket_cached;
952 break; 1009 break;
953 case AGGR_CORE: 1010 case AGGR_CORE:
954 if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) { 1011 if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) {
955 perror("cannot build core map"); 1012 perror("cannot build core map");
956 return -1; 1013 return -1;
957 } 1014 }
958 aggr_get_id = cpu_map__get_core; 1015 aggr_get_id = perf_stat__get_core_cached;
959 break; 1016 break;
960 case AGGR_NONE: 1017 case AGGR_NONE:
961 case AGGR_GLOBAL: 1018 case AGGR_GLOBAL:
962 case AGGR_THREAD: 1019 case AGGR_THREAD:
1020 case AGGR_UNSET:
963 default: 1021 default:
964 break; 1022 break;
965 } 1023 }
966 return 0; 1024
1025 /*
1026 * The evsel_list->cpus is the base we operate on,
1027 * taking the highest cpu number to be the size of
1028 * the aggregation translate cpumap.
1029 */
1030 nr = cpu_map__get_max(evsel_list->cpus);
1031 cpus_aggr_map = cpu_map__empty_new(nr + 1);
1032 return cpus_aggr_map ? 0 : -ENOMEM;
967} 1033}
968 1034
969/* 1035/*
@@ -1179,7 +1245,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1179 OPT_STRING(0, "post", &post_cmd, "command", 1245 OPT_STRING(0, "post", &post_cmd, "command",
1180 "command to run after to the measured command"), 1246 "command to run after to the measured command"),
1181 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1247 OPT_UINTEGER('I', "interval-print", &stat_config.interval,
1182 "print counts at regular interval in ms (>= 100)"), 1248 "print counts at regular interval in ms (>= 10)"),
1183 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, 1249 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1184 "aggregate counts per processor socket", AGGR_SOCKET), 1250 "aggregate counts per processor socket", AGGR_SOCKET),
1185 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, 1251 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
@@ -1332,9 +1398,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1332 thread_map__read_comms(evsel_list->threads); 1398 thread_map__read_comms(evsel_list->threads);
1333 1399
1334 if (interval && interval < 100) { 1400 if (interval && interval < 100) {
1335 pr_err("print interval must be >= 100ms\n"); 1401 if (interval < 10) {
1336 parse_options_usage(stat_usage, options, "I", 1); 1402 pr_err("print interval must be >= 10ms\n");
1337 goto out; 1403 parse_options_usage(stat_usage, options, "I", 1);
1404 goto out;
1405 } else
1406 pr_warning("print interval < 100ms. "
1407 "The overhead percentage could be high in some cases. "
1408 "Please proceed with caution.\n");
1338 } 1409 }
1339 1410
1340 if (perf_evlist__alloc_stats(evsel_list, interval)) 1411 if (perf_evlist__alloc_stats(evsel_list, interval))
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 8c465c83aabf..7e2e72e6d9d1 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -655,7 +655,7 @@ static int symbol_filter(struct map *map, struct symbol *sym)
655{ 655{
656 const char *name = sym->name; 656 const char *name = sym->name;
657 657
658 if (!map->dso->kernel) 658 if (!__map__is_kernel(map))
659 return 0; 659 return 0;
660 /* 660 /*
661 * ppc64 uses function descriptors and appends a '.' to the 661 * ppc64 uses function descriptors and appends a '.' to the
@@ -857,9 +857,12 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
857 * TODO: we don't process guest user from host side 857 * TODO: we don't process guest user from host side
858 * except simple counting. 858 * except simple counting.
859 */ 859 */
860 /* Fall thru */
861 default:
862 goto next_event; 860 goto next_event;
861 default:
862 if (event->header.type == PERF_RECORD_SAMPLE)
863 goto next_event;
864 machine = &session->machines.host;
865 break;
863 } 866 }
864 867
865 868
@@ -952,7 +955,7 @@ static int __cmd_top(struct perf_top *top)
952 machines__set_symbol_filter(&top->session->machines, symbol_filter); 955 machines__set_symbol_filter(&top->session->machines, symbol_filter);
953 956
954 if (!objdump_path) { 957 if (!objdump_path) {
955 ret = perf_session_env__lookup_objdump(&top->session->header.env); 958 ret = perf_env__lookup_objdump(&top->session->header.env);
956 if (ret) 959 if (ret)
957 goto out_delete; 960 goto out_delete;
958 } 961 }
@@ -961,8 +964,18 @@ static int __cmd_top(struct perf_top *top)
961 if (ret) 964 if (ret)
962 goto out_delete; 965 goto out_delete;
963 966
967 if (perf_session__register_idle_thread(top->session) == NULL)
968 goto out_delete;
969
964 machine__synthesize_threads(&top->session->machines.host, &opts->target, 970 machine__synthesize_threads(&top->session->machines.host, &opts->target,
965 top->evlist->threads, false, opts->proc_map_timeout); 971 top->evlist->threads, false, opts->proc_map_timeout);
972
973 if (sort__has_socket) {
974 ret = perf_env__read_cpu_topology_map(&perf_env);
975 if (ret < 0)
976 goto out_err_cpu_topo;
977 }
978
966 ret = perf_top__start_counters(top); 979 ret = perf_top__start_counters(top);
967 if (ret) 980 if (ret)
968 goto out_delete; 981 goto out_delete;
@@ -1020,6 +1033,14 @@ out_delete:
1020 top->session = NULL; 1033 top->session = NULL;
1021 1034
1022 return ret; 1035 return ret;
1036
1037out_err_cpu_topo: {
1038 char errbuf[BUFSIZ];
1039 const char *err = strerror_r(-ret, errbuf, sizeof(errbuf));
1040
1041 ui__error("Could not read the CPU topology map: %s\n", err);
1042 goto out_delete;
1043}
1023} 1044}
1024 1045
1025static int 1046static int
@@ -1032,8 +1053,22 @@ callchain_opt(const struct option *opt, const char *arg, int unset)
1032static int 1053static int
1033parse_callchain_opt(const struct option *opt, const char *arg, int unset) 1054parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1034{ 1055{
1035 symbol_conf.use_callchain = true; 1056 struct record_opts *record = (struct record_opts *)opt->value;
1036 return record_parse_callchain_opt(opt, arg, unset); 1057
1058 record->callgraph_set = true;
1059 callchain_param.enabled = !unset;
1060 callchain_param.record_mode = CALLCHAIN_FP;
1061
1062 /*
1063 * --no-call-graph
1064 */
1065 if (unset) {
1066 symbol_conf.use_callchain = false;
1067 callchain_param.record_mode = CALLCHAIN_NONE;
1068 return 0;
1069 }
1070
1071 return parse_callchain_top_opt(arg);
1037} 1072}
1038 1073
1039static int perf_top_config(const char *var, const char *value, void *cb) 1074static int perf_top_config(const char *var, const char *value, void *cb)
@@ -1058,6 +1093,9 @@ parse_percent_limit(const struct option *opt, const char *arg,
1058 return 0; 1093 return 0;
1059} 1094}
1060 1095
1096const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
1097 "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
1098
1061int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) 1099int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1062{ 1100{
1063 char errbuf[BUFSIZ]; 1101 char errbuf[BUFSIZ];
@@ -1133,11 +1171,11 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1133 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 1171 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1134 "Show a column with the number of samples"), 1172 "Show a column with the number of samples"),
1135 OPT_CALLBACK_NOOPT('g', NULL, &top.record_opts, 1173 OPT_CALLBACK_NOOPT('g', NULL, &top.record_opts,
1136 NULL, "enables call-graph recording", 1174 NULL, "enables call-graph recording and display",
1137 &callchain_opt), 1175 &callchain_opt),
1138 OPT_CALLBACK(0, "call-graph", &top.record_opts, 1176 OPT_CALLBACK(0, "call-graph", &top.record_opts,
1139 "mode[,dump_size]", record_callchain_help, 1177 "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
1140 &parse_callchain_opt), 1178 top_callchain_help, &parse_callchain_opt),
1141 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain, 1179 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1142 "Accumulate callchains of children and show total overhead as well"), 1180 "Accumulate callchains of children and show total overhead as well"),
1143 OPT_INTEGER(0, "max-stack", &top.max_stack, 1181 OPT_INTEGER(0, "max-stack", &top.max_stack,
@@ -1267,6 +1305,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1267 perf_hpp__cancel_cumulate(); 1305 perf_hpp__cancel_cumulate();
1268 } 1306 }
1269 1307
1308 if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1309 callchain_param.order = ORDER_CALLER;
1310
1270 symbol_conf.priv_size = sizeof(struct annotation); 1311 symbol_conf.priv_size = sizeof(struct annotation);
1271 1312
1272 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); 1313 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 4e3abba03062..c783d8fd3a80 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <traceevent/event-parse.h> 19#include <traceevent/event-parse.h>
20#include <api/fs/tracing_path.h>
20#include "builtin.h" 21#include "builtin.h"
21#include "util/color.h" 22#include "util/color.h"
22#include "util/debug.h" 23#include "util/debug.h"
@@ -37,6 +38,7 @@
37#include <stdlib.h> 38#include <stdlib.h>
38#include <sys/mman.h> 39#include <sys/mman.h>
39#include <linux/futex.h> 40#include <linux/futex.h>
41#include <linux/err.h>
40 42
41/* For older distros: */ 43/* For older distros: */
42#ifndef MAP_STACK 44#ifndef MAP_STACK
@@ -244,13 +246,14 @@ static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void
244 struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction); 246 struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
245 247
246 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */ 248 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
247 if (evsel == NULL) 249 if (IS_ERR(evsel))
248 evsel = perf_evsel__newtp("syscalls", direction); 250 evsel = perf_evsel__newtp("syscalls", direction);
249 251
250 if (evsel) { 252 if (IS_ERR(evsel))
251 if (perf_evsel__init_syscall_tp(evsel, handler)) 253 return NULL;
252 goto out_delete; 254
253 } 255 if (perf_evsel__init_syscall_tp(evsel, handler))
256 goto out_delete;
254 257
255 return evsel; 258 return evsel;
256 259
@@ -582,6 +585,12 @@ static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct sysc
582 585
583#define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op 586#define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
584 587
588static const char *bpf_cmd[] = {
589 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
590 "MAP_GET_NEXT_KEY", "PROG_LOAD",
591};
592static DEFINE_STRARRAY(bpf_cmd);
593
585static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", }; 594static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
586static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1); 595static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
587 596
@@ -1008,6 +1017,7 @@ static struct syscall_fmt {
1008 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ 1017 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
1009 [1] = SCA_ACCMODE, /* mode */ }, }, 1018 [1] = SCA_ACCMODE, /* mode */ }, },
1010 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", }, 1019 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
1020 { .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
1011 { .name = "brk", .hexret = true, 1021 { .name = "brk", .hexret = true,
1012 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, }, 1022 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
1013 { .name = "chdir", .errmsg = true, 1023 { .name = "chdir", .errmsg = true,
@@ -1704,12 +1714,12 @@ static int trace__read_syscall_info(struct trace *trace, int id)
1704 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 1714 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1705 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 1715 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1706 1716
1707 if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) { 1717 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1708 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 1718 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1709 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 1719 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1710 } 1720 }
1711 1721
1712 if (sc->tp_format == NULL) 1722 if (IS_ERR(sc->tp_format))
1713 return -1; 1723 return -1;
1714 1724
1715 sc->args = sc->tp_format->format.fields; 1725 sc->args = sc->tp_format->format.fields;
@@ -2389,7 +2399,8 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2389static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist) 2399static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
2390{ 2400{
2391 struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname"); 2401 struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
2392 if (evsel == NULL) 2402
2403 if (IS_ERR(evsel))
2393 return false; 2404 return false;
2394 2405
2395 if (perf_evsel__field(evsel, "pathname") == NULL) { 2406 if (perf_evsel__field(evsel, "pathname") == NULL) {
@@ -2686,11 +2697,11 @@ out_delete_evlist:
2686 char errbuf[BUFSIZ]; 2697 char errbuf[BUFSIZ];
2687 2698
2688out_error_sched_stat_runtime: 2699out_error_sched_stat_runtime:
2689 debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime"); 2700 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2690 goto out_error; 2701 goto out_error;
2691 2702
2692out_error_raw_syscalls: 2703out_error_raw_syscalls:
2693 debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)"); 2704 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2694 goto out_error; 2705 goto out_error;
2695 2706
2696out_error_mmap: 2707out_error_mmap:
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 38a08539f4bf..de89ec574361 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -106,9 +106,14 @@ ifdef LIBBABELTRACE
106 FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf 106 FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
107endif 107endif
108 108
109FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
109# include ARCH specific config 110# include ARCH specific config
110-include $(src-perf)/arch/$(ARCH)/Makefile 111-include $(src-perf)/arch/$(ARCH)/Makefile
111 112
113ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
114 CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
115endif
116
112include $(src-perf)/config/utilities.mak 117include $(src-perf)/config/utilities.mak
113 118
114ifeq ($(call get-executable,$(FLEX)),) 119ifeq ($(call get-executable,$(FLEX)),)
@@ -233,6 +238,7 @@ ifdef NO_LIBELF
233 NO_DEMANGLE := 1 238 NO_DEMANGLE := 1
234 NO_LIBUNWIND := 1 239 NO_LIBUNWIND := 1
235 NO_LIBDW_DWARF_UNWIND := 1 240 NO_LIBDW_DWARF_UNWIND := 1
241 NO_LIBBPF := 1
236else 242else
237 ifeq ($(feature-libelf), 0) 243 ifeq ($(feature-libelf), 0)
238 ifeq ($(feature-glibc), 1) 244 ifeq ($(feature-glibc), 1)
@@ -242,13 +248,14 @@ else
242 LIBC_SUPPORT := 1 248 LIBC_SUPPORT := 1
243 endif 249 endif
244 ifeq ($(LIBC_SUPPORT),1) 250 ifeq ($(LIBC_SUPPORT),1)
245 msg := $(warning No libelf found, disables 'probe' tool, please install elfutils-libelf-devel/libelf-dev); 251 msg := $(warning No libelf found, disables 'probe' tool and BPF support in 'perf record', please install elfutils-libelf-devel/libelf-dev);
246 252
247 NO_LIBELF := 1 253 NO_LIBELF := 1
248 NO_DWARF := 1 254 NO_DWARF := 1
249 NO_DEMANGLE := 1 255 NO_DEMANGLE := 1
250 NO_LIBUNWIND := 1 256 NO_LIBUNWIND := 1
251 NO_LIBDW_DWARF_UNWIND := 1 257 NO_LIBDW_DWARF_UNWIND := 1
258 NO_LIBBPF := 1
252 else 259 else
253 ifneq ($(filter s% -static%,$(LDFLAGS),),) 260 ifneq ($(filter s% -static%,$(LDFLAGS),),)
254 msg := $(error No static glibc found, please install glibc-static); 261 msg := $(error No static glibc found, please install glibc-static);
@@ -305,6 +312,13 @@ ifndef NO_LIBELF
305 $(call detected,CONFIG_DWARF) 312 $(call detected,CONFIG_DWARF)
306 endif # PERF_HAVE_DWARF_REGS 313 endif # PERF_HAVE_DWARF_REGS
307 endif # NO_DWARF 314 endif # NO_DWARF
315
316 ifndef NO_LIBBPF
317 ifeq ($(feature-bpf), 1)
318 CFLAGS += -DHAVE_LIBBPF_SUPPORT
319 $(call detected,CONFIG_LIBBPF)
320 endif
321 endif # NO_LIBBPF
308endif # NO_LIBELF 322endif # NO_LIBELF
309 323
310ifeq ($(ARCH),powerpc) 324ifeq ($(ARCH),powerpc)
@@ -320,6 +334,13 @@ ifndef NO_LIBUNWIND
320 endif 334 endif
321endif 335endif
322 336
337ifndef NO_LIBBPF
338 ifneq ($(feature-bpf), 1)
339 msg := $(warning BPF API too old. Please install recent kernel headers. BPF support in 'perf record' is disabled.)
340 NO_LIBBPF := 1
341 endif
342endif
343
323dwarf-post-unwind := 1 344dwarf-post-unwind := 1
324dwarf-post-unwind-text := BUG 345dwarf-post-unwind-text := BUG
325 346
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 07dbff5c0e60..3d4c7c09adea 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -8,14 +8,16 @@
8 */ 8 */
9#include "builtin.h" 9#include "builtin.h"
10 10
11#include "util/env.h"
11#include "util/exec_cmd.h" 12#include "util/exec_cmd.h"
12#include "util/cache.h" 13#include "util/cache.h"
13#include "util/quote.h" 14#include "util/quote.h"
14#include "util/run-command.h" 15#include "util/run-command.h"
15#include "util/parse-events.h" 16#include "util/parse-events.h"
16#include "util/parse-options.h" 17#include "util/parse-options.h"
18#include "util/bpf-loader.h"
17#include "util/debug.h" 19#include "util/debug.h"
18#include <api/fs/debugfs.h> 20#include <api/fs/tracing_path.h>
19#include <pthread.h> 21#include <pthread.h>
20 22
21const char perf_usage_string[] = 23const char perf_usage_string[] =
@@ -161,6 +163,20 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
161 break; 163 break;
162 164
163 /* 165 /*
166 * Shortcut for '-h' and '-v' options to invoke help
167 * and version command.
168 */
169 if (!strcmp(cmd, "-h")) {
170 (*argv)[0] = "--help";
171 break;
172 }
173
174 if (!strcmp(cmd, "-v")) {
175 (*argv)[0] = "--version";
176 break;
177 }
178
179 /*
164 * Check remaining flags. 180 * Check remaining flags.
165 */ 181 */
166 if (!prefixcmp(cmd, CMD_EXEC_PATH)) { 182 if (!prefixcmp(cmd, CMD_EXEC_PATH)) {
@@ -214,7 +230,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
214 fprintf(stderr, "No directory given for --debugfs-dir.\n"); 230 fprintf(stderr, "No directory given for --debugfs-dir.\n");
215 usage(perf_usage_string); 231 usage(perf_usage_string);
216 } 232 }
217 perf_debugfs_set_path((*argv)[1]); 233 tracing_path_set((*argv)[1]);
218 if (envchanged) 234 if (envchanged)
219 *envchanged = 1; 235 *envchanged = 1;
220 (*argv)++; 236 (*argv)++;
@@ -230,7 +246,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
230 (*argv)++; 246 (*argv)++;
231 (*argc)--; 247 (*argc)--;
232 } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) { 248 } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
233 perf_debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR)); 249 tracing_path_set(cmd + strlen(CMD_DEBUGFS_DIR));
234 fprintf(stderr, "dir: %s\n", tracing_path); 250 fprintf(stderr, "dir: %s\n", tracing_path);
235 if (envchanged) 251 if (envchanged)
236 *envchanged = 1; 252 *envchanged = 1;
@@ -369,6 +385,8 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
369 385
370 status = p->fn(argc, argv, prefix); 386 status = p->fn(argc, argv, prefix);
371 exit_browser(status); 387 exit_browser(status);
388 perf_env__exit(&perf_env);
389 bpf__clear();
372 390
373 if (status) 391 if (status)
374 return status & 0xff; 392 return status & 0xff;
@@ -517,8 +535,10 @@ int main(int argc, const char **argv)
517 cmd = perf_extract_argv0_path(argv[0]); 535 cmd = perf_extract_argv0_path(argv[0]);
518 if (!cmd) 536 if (!cmd)
519 cmd = "perf-help"; 537 cmd = "perf-help";
520 /* get debugfs mount point from /proc/mounts */ 538
521 perf_debugfs_mount(NULL); 539 /* get debugfs/tracefs mount point from /proc/mounts */
540 tracing_path_mount();
541
522 /* 542 /*
523 * "perf-xxxx" is the same as "perf xxxx", but we obviously: 543 * "perf-xxxx" is the same as "perf xxxx", but we obviously:
524 * 544 *
diff --git a/tools/perf/python/twatch.py b/tools/perf/python/twatch.py
index b9d508336ae6..c235c22b107a 100755
--- a/tools/perf/python/twatch.py
+++ b/tools/perf/python/twatch.py
@@ -15,14 +15,14 @@
15 15
16import perf 16import perf
17 17
18def main(): 18def main(context_switch = 0, thread = -1):
19 cpus = perf.cpu_map() 19 cpus = perf.cpu_map()
20 threads = perf.thread_map() 20 threads = perf.thread_map(thread)
21 evsel = perf.evsel(type = perf.TYPE_SOFTWARE, 21 evsel = perf.evsel(type = perf.TYPE_SOFTWARE,
22 config = perf.COUNT_SW_DUMMY, 22 config = perf.COUNT_SW_DUMMY,
23 task = 1, comm = 1, mmap = 0, freq = 0, 23 task = 1, comm = 1, mmap = 0, freq = 0,
24 wakeup_events = 1, watermark = 1, 24 wakeup_events = 1, watermark = 1,
25 sample_id_all = 1, 25 sample_id_all = 1, context_switch = context_switch,
26 sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU) 26 sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
27 27
28 """What we want are just the PERF_RECORD_ lifetime events for threads, 28 """What we want are just the PERF_RECORD_ lifetime events for threads,
@@ -48,4 +48,21 @@ def main():
48 print event 48 print event
49 49
50if __name__ == '__main__': 50if __name__ == '__main__':
51 """
52 To test the PERF_RECORD_SWITCH record, pick a pid and replace
53 in the following line.
54
55 Example output:
56
57cpu: 3, pid: 31463, tid: 31593 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31593, switch_out: 1 }
58cpu: 1, pid: 31463, tid: 31489 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31489, switch_out: 1 }
59cpu: 2, pid: 31463, tid: 31496 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31496, switch_out: 1 }
60cpu: 3, pid: 31463, tid: 31491 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31491, switch_out: 0 }
61
62 It is possible as well to use event.misc & perf.PERF_RECORD_MISC_SWITCH_OUT
63 to figure out if this is a context switch in or out of the monitored threads.
64
65 If bored, please add command line option parsing support for these options :-)
66 """
67 # main(context_switch = 1, thread = 31463)
51 main() 68 main()
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 84a32037a80f..1b02cdc0cab6 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -61,6 +61,142 @@ import datetime
61# 61#
62# An example of using the database is provided by the script 62# An example of using the database is provided by the script
63# call-graph-from-postgresql.py. Refer to that script for details. 63# call-graph-from-postgresql.py. Refer to that script for details.
64#
65# Tables:
66#
67# The tables largely correspond to perf tools' data structures. They are largely self-explanatory.
68#
69# samples
70#
71# 'samples' is the main table. It represents what instruction was executing at a point in time
72# when something (a selected event) happened. The memory address is the instruction pointer or 'ip'.
73#
74# calls
75#
76# 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'.
77# 'calls' is only created when the 'calls' option to this script is specified.
78#
79# call_paths
80#
81# 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'.
82# 'calls_paths' is only created when the 'calls' option to this script is specified.
83#
84# branch_types
85#
86# 'branch_types' provides descriptions for each type of branch.
87#
88# comm_threads
89#
90# 'comm_threads' shows how 'comms' relates to 'threads'.
91#
92# comms
93#
94# 'comms' contains a record for each 'comm' - the name given to the executable that is running.
95#
96# dsos
97#
98# 'dsos' contains a record for each executable file or library.
99#
100# machines
101#
102# 'machines' can be used to distinguish virtual machines if virtualization is supported.
103#
104# selected_events
105#
106# 'selected_events' contains a record for each kind of event that has been sampled.
107#
108# symbols
109#
110# 'symbols' contains a record for each symbol. Only symbols that have samples are present.
111#
112# threads
113#
114# 'threads' contains a record for each thread.
115#
116# Views:
117#
118# Most of the tables have views for more friendly display. The views are:
119#
120# calls_view
121# call_paths_view
122# comm_threads_view
123# dsos_view
124# machines_view
125# samples_view
126# symbols_view
127# threads_view
128#
129# More examples of browsing the database with psql:
130# Note that some of the examples are not the most optimal SQL query.
131# Note that call information is only available if the script's 'calls' option has been used.
132#
133# Top 10 function calls (not aggregated by symbol):
134#
135# SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10;
136#
137# Top 10 function calls (aggregated by symbol):
138#
139# SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,
140# SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count
141# FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10;
142#
143# Note that the branch count gives a rough estimation of cpu usage, so functions
144# that took a long time but have a relatively low branch count must have spent time
145# waiting.
146#
147# Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'):
148#
149# SELECT * FROM symbols_view WHERE name LIKE '%alloc%';
150#
151# Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187):
152#
153# SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10;
154#
155# Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254):
156#
157# SELECT * FROM calls_view WHERE parent_call_path_id = 254;
158#
159# Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670)
160#
161# SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%';
162#
163# Show transactions:
164#
165# SELECT * FROM samples_view WHERE event = 'transactions';
166#
167# Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false.
168# Transaction aborts have branch_type_name 'transaction abort'
169#
170# Show transaction aborts:
171#
172# SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort';
173#
174# To print a call stack requires walking the call_paths table. For example this python script:
175# #!/usr/bin/python2
176#
177# import sys
178# from PySide.QtSql import *
179#
180# if __name__ == '__main__':
181# if (len(sys.argv) < 3):
182# print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>"
183# raise Exception("Too few arguments")
184# dbname = sys.argv[1]
185# call_path_id = sys.argv[2]
186# db = QSqlDatabase.addDatabase('QPSQL')
187# db.setDatabaseName(dbname)
188# if not db.open():
189# raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
190# query = QSqlQuery(db)
191# print " id ip symbol_id symbol dso_id dso_short_name"
192# while call_path_id != 0 and call_path_id != 1:
193# ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id))
194# if not ret:
195# raise Exception("Query failed: " + query.lastError().text())
196# if not query.next():
197# raise Exception("Query failed")
198# print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5))
199# call_path_id = query.value(6)
64 200
65from PySide.QtSql import * 201from PySide.QtSql import *
66 202
@@ -244,6 +380,91 @@ if perf_db_export_calls:
244 'parent_call_path_id bigint,' 380 'parent_call_path_id bigint,'
245 'flags integer)') 381 'flags integer)')
246 382
383do_query(query, 'CREATE VIEW machines_view AS '
384 'SELECT '
385 'id,'
386 'pid,'
387 'root_dir,'
388 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
389 ' FROM machines')
390
391do_query(query, 'CREATE VIEW dsos_view AS '
392 'SELECT '
393 'id,'
394 'machine_id,'
395 '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
396 'short_name,'
397 'long_name,'
398 'build_id'
399 ' FROM dsos')
400
401do_query(query, 'CREATE VIEW symbols_view AS '
402 'SELECT '
403 'id,'
404 'name,'
405 '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
406 'dso_id,'
407 'sym_start,'
408 'sym_end,'
409 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
410 ' FROM symbols')
411
412do_query(query, 'CREATE VIEW threads_view AS '
413 'SELECT '
414 'id,'
415 'machine_id,'
416 '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
417 'process_id,'
418 'pid,'
419 'tid'
420 ' FROM threads')
421
422do_query(query, 'CREATE VIEW comm_threads_view AS '
423 'SELECT '
424 'comm_id,'
425 '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
426 'thread_id,'
427 '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
428 '(SELECT tid FROM threads WHERE id = thread_id) AS tid'
429 ' FROM comm_threads')
430
431if perf_db_export_calls:
432 do_query(query, 'CREATE VIEW call_paths_view AS '
433 'SELECT '
434 'c.id,'
435 'to_hex(c.ip) AS ip,'
436 'c.symbol_id,'
437 '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
438 '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
439 '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,'
440 'c.parent_id,'
441 'to_hex(p.ip) AS parent_ip,'
442 'p.symbol_id AS parent_symbol_id,'
443 '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
444 '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
445 '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
446 ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
447 do_query(query, 'CREATE VIEW calls_view AS '
448 'SELECT '
449 'calls.id,'
450 'thread_id,'
451 '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
452 '(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
453 '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
454 'call_path_id,'
455 'to_hex(ip) AS ip,'
456 'symbol_id,'
457 '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
458 'call_time,'
459 'return_time,'
460 'return_time - call_time AS elapsed_time,'
461 'branch_count,'
462 'call_id,'
463 'return_id,'
464 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
465 'parent_call_path_id'
466 ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
467
247do_query(query, 'CREATE VIEW samples_view AS ' 468do_query(query, 'CREATE VIEW samples_view AS '
248 'SELECT ' 469 'SELECT '
249 'id,' 470 'id,'
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index c1518bdd0f1b..50de2253cff6 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -8,7 +8,6 @@ perf-y += openat-syscall-all-cpus.o
8perf-y += openat-syscall-tp-fields.o 8perf-y += openat-syscall-tp-fields.o
9perf-y += mmap-basic.o 9perf-y += mmap-basic.o
10perf-y += perf-record.o 10perf-y += perf-record.o
11perf-y += rdpmc.o
12perf-y += evsel-roundtrip-name.o 11perf-y += evsel-roundtrip-name.o
13perf-y += evsel-tp-sched.o 12perf-y += evsel-tp-sched.o
14perf-y += fdarray.o 13perf-y += fdarray.o
@@ -33,8 +32,7 @@ perf-y += parse-no-sample-id-all.o
33perf-y += kmod-path.o 32perf-y += kmod-path.o
34perf-y += thread-map.o 33perf-y += thread-map.o
35perf-y += llvm.o 34perf-y += llvm.o
36 35perf-y += topology.o
37perf-$(CONFIG_X86) += perf-time-to-tsc.o
38 36
39ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64)) 37ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
40perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o 38perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c
new file mode 100644
index 000000000000..410a70b93b93
--- /dev/null
+++ b/tools/perf/tests/bpf-script-example.c
@@ -0,0 +1,44 @@
1#ifndef LINUX_VERSION_CODE
2# error Need LINUX_VERSION_CODE
3# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
4#endif
5#define BPF_ANY 0
6#define BPF_MAP_TYPE_ARRAY 2
7#define BPF_FUNC_map_lookup_elem 1
8#define BPF_FUNC_map_update_elem 2
9
10static void *(*bpf_map_lookup_elem)(void *map, void *key) =
11 (void *) BPF_FUNC_map_lookup_elem;
12static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) =
13 (void *) BPF_FUNC_map_update_elem;
14
15struct bpf_map_def {
16 unsigned int type;
17 unsigned int key_size;
18 unsigned int value_size;
19 unsigned int max_entries;
20};
21
22#define SEC(NAME) __attribute__((section(NAME), used))
23struct bpf_map_def SEC("maps") flip_table = {
24 .type = BPF_MAP_TYPE_ARRAY,
25 .key_size = sizeof(int),
26 .value_size = sizeof(int),
27 .max_entries = 1,
28};
29
30SEC("func=sys_epoll_pwait")
31int bpf_func__sys_epoll_pwait(void *ctx)
32{
33 int ind =0;
34 int *flag = bpf_map_lookup_elem(&flip_table, &ind);
35 int new_flag;
36 if (!flag)
37 return 0;
38 /* flip flag and store back */
39 new_flag = !*flag;
40 bpf_map_update_elem(&flip_table, &ind, &new_flag, BPF_ANY);
41 return new_flag;
42}
43char _license[] SEC("license") = "GPL";
44int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 136cd934be66..66f72d3d6677 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -14,10 +14,13 @@
14#include "parse-options.h" 14#include "parse-options.h"
15#include "symbol.h" 15#include "symbol.h"
16 16
17static struct test { 17struct test __weak arch_tests[] = {
18 const char *desc; 18 {
19 int (*func)(void); 19 .func = NULL,
20} tests[] = { 20 },
21};
22
23static struct test generic_tests[] = {
21 { 24 {
22 .desc = "vmlinux symtab matches kallsyms", 25 .desc = "vmlinux symtab matches kallsyms",
23 .func = test__vmlinux_matches_kallsyms, 26 .func = test__vmlinux_matches_kallsyms,
@@ -38,12 +41,6 @@ static struct test {
38 .desc = "parse events tests", 41 .desc = "parse events tests",
39 .func = test__parse_events, 42 .func = test__parse_events,
40 }, 43 },
41#if defined(__x86_64__) || defined(__i386__)
42 {
43 .desc = "x86 rdpmc test",
44 .func = test__rdpmc,
45 },
46#endif
47 { 44 {
48 .desc = "Validate PERF_RECORD_* events & perf_sample fields", 45 .desc = "Validate PERF_RECORD_* events & perf_sample fields",
49 .func = test__PERF_RECORD, 46 .func = test__PERF_RECORD,
@@ -104,12 +101,6 @@ static struct test {
104 .desc = "Test software clock events have valid period values", 101 .desc = "Test software clock events have valid period values",
105 .func = test__sw_clock_freq, 102 .func = test__sw_clock_freq,
106 }, 103 },
107#if defined(__x86_64__) || defined(__i386__)
108 {
109 .desc = "Test converting perf time to TSC",
110 .func = test__perf_time_to_tsc,
111 },
112#endif
113 { 104 {
114 .desc = "Test object code reading", 105 .desc = "Test object code reading",
115 .func = test__code_reading, 106 .func = test__code_reading,
@@ -126,14 +117,6 @@ static struct test {
126 .desc = "Test parsing with no sample_id_all bit set", 117 .desc = "Test parsing with no sample_id_all bit set",
127 .func = test__parse_no_sample_id_all, 118 .func = test__parse_no_sample_id_all,
128 }, 119 },
129#if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__)
130#ifdef HAVE_DWARF_UNWIND_SUPPORT
131 {
132 .desc = "Test dwarf unwind",
133 .func = test__dwarf_unwind,
134 },
135#endif
136#endif
137 { 120 {
138 .desc = "Test filtering hist entries", 121 .desc = "Test filtering hist entries",
139 .func = test__hists_filter, 122 .func = test__hists_filter,
@@ -179,11 +162,20 @@ static struct test {
179 .func = test__llvm, 162 .func = test__llvm,
180 }, 163 },
181 { 164 {
165 .desc = "Test topology in session",
166 .func = test_session_topology,
167 },
168 {
182 .func = NULL, 169 .func = NULL,
183 }, 170 },
184}; 171};
185 172
186static bool perf_test__matches(int curr, int argc, const char *argv[]) 173static struct test *tests[] = {
174 generic_tests,
175 arch_tests,
176};
177
178static bool perf_test__matches(struct test *test, int curr, int argc, const char *argv[])
187{ 179{
188 int i; 180 int i;
189 181
@@ -200,7 +192,7 @@ static bool perf_test__matches(int curr, int argc, const char *argv[])
200 continue; 192 continue;
201 } 193 }
202 194
203 if (strstr(tests[curr].desc, argv[i])) 195 if (strstr(test->desc, argv[i]))
204 return true; 196 return true;
205 } 197 }
206 198
@@ -237,27 +229,31 @@ static int run_test(struct test *test)
237 return err; 229 return err;
238} 230}
239 231
232#define for_each_test(j, t) \
233 for (j = 0; j < ARRAY_SIZE(tests); j++) \
234 for (t = &tests[j][0]; t->func; t++)
235
240static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) 236static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
241{ 237{
238 struct test *t;
239 unsigned int j;
242 int i = 0; 240 int i = 0;
243 int width = 0; 241 int width = 0;
244 242
245 while (tests[i].func) { 243 for_each_test(j, t) {
246 int len = strlen(tests[i].desc); 244 int len = strlen(t->desc);
247 245
248 if (width < len) 246 if (width < len)
249 width = len; 247 width = len;
250 ++i;
251 } 248 }
252 249
253 i = 0; 250 for_each_test(j, t) {
254 while (tests[i].func) {
255 int curr = i++, err; 251 int curr = i++, err;
256 252
257 if (!perf_test__matches(curr, argc, argv)) 253 if (!perf_test__matches(t, curr, argc, argv))
258 continue; 254 continue;
259 255
260 pr_info("%2d: %-*s:", i, width, tests[curr].desc); 256 pr_info("%2d: %-*s:", i, width, t->desc);
261 257
262 if (intlist__find(skiplist, i)) { 258 if (intlist__find(skiplist, i)) {
263 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n"); 259 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
@@ -265,8 +261,8 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
265 } 261 }
266 262
267 pr_debug("\n--- start ---\n"); 263 pr_debug("\n--- start ---\n");
268 err = run_test(&tests[curr]); 264 err = run_test(t);
269 pr_debug("---- end ----\n%s:", tests[curr].desc); 265 pr_debug("---- end ----\n%s:", t->desc);
270 266
271 switch (err) { 267 switch (err) {
272 case TEST_OK: 268 case TEST_OK:
@@ -287,15 +283,15 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
287 283
288static int perf_test__list(int argc, const char **argv) 284static int perf_test__list(int argc, const char **argv)
289{ 285{
286 unsigned int j;
287 struct test *t;
290 int i = 0; 288 int i = 0;
291 289
292 while (tests[i].func) { 290 for_each_test(j, t) {
293 int curr = i++; 291 if (argc > 1 && !strstr(t->desc, argv[1]))
294
295 if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
296 continue; 292 continue;
297 293
298 pr_info("%2d: %s\n", i, tests[curr].desc); 294 pr_info("%2d: %s\n", ++i, t->desc);
299 } 295 }
300 296
301 return 0; 297 return 0;
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 39c784a100a9..49b1959dda41 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -33,20 +33,20 @@ static unsigned int hex(char c)
33 return c - 'A' + 10; 33 return c - 'A' + 10;
34} 34}
35 35
36static void read_objdump_line(const char *line, size_t line_len, void **buf, 36static size_t read_objdump_line(const char *line, size_t line_len, void *buf,
37 size_t *len) 37 size_t len)
38{ 38{
39 const char *p; 39 const char *p;
40 size_t i; 40 size_t i, j = 0;
41 41
42 /* Skip to a colon */ 42 /* Skip to a colon */
43 p = strchr(line, ':'); 43 p = strchr(line, ':');
44 if (!p) 44 if (!p)
45 return; 45 return 0;
46 i = p + 1 - line; 46 i = p + 1 - line;
47 47
48 /* Read bytes */ 48 /* Read bytes */
49 while (*len) { 49 while (j < len) {
50 char c1, c2; 50 char c1, c2;
51 51
52 /* Skip spaces */ 52 /* Skip spaces */
@@ -65,20 +65,26 @@ static void read_objdump_line(const char *line, size_t line_len, void **buf,
65 if (i < line_len && line[i] && !isspace(line[i])) 65 if (i < line_len && line[i] && !isspace(line[i]))
66 break; 66 break;
67 /* Store byte */ 67 /* Store byte */
68 *(unsigned char *)*buf = (hex(c1) << 4) | hex(c2); 68 *(unsigned char *)buf = (hex(c1) << 4) | hex(c2);
69 *buf += 1; 69 buf += 1;
70 *len -= 1; 70 j++;
71 } 71 }
72 /* return number of successfully read bytes */
73 return j;
72} 74}
73 75
74static int read_objdump_output(FILE *f, void **buf, size_t *len) 76static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
75{ 77{
76 char *line = NULL; 78 char *line = NULL;
77 size_t line_len; 79 size_t line_len, off_last = 0;
78 ssize_t ret; 80 ssize_t ret;
79 int err = 0; 81 int err = 0;
82 u64 addr, last_addr = start_addr;
83
84 while (off_last < *len) {
85 size_t off, read_bytes, written_bytes;
86 unsigned char tmp[BUFSZ];
80 87
81 while (1) {
82 ret = getline(&line, &line_len, f); 88 ret = getline(&line, &line_len, f);
83 if (feof(f)) 89 if (feof(f))
84 break; 90 break;
@@ -87,9 +93,33 @@ static int read_objdump_output(FILE *f, void **buf, size_t *len)
87 err = -1; 93 err = -1;
88 break; 94 break;
89 } 95 }
90 read_objdump_line(line, ret, buf, len); 96
97 /* read objdump data into temporary buffer */
98 read_bytes = read_objdump_line(line, ret, tmp, sizeof(tmp));
99 if (!read_bytes)
100 continue;
101
102 if (sscanf(line, "%"PRIx64, &addr) != 1)
103 continue;
104 if (addr < last_addr) {
105 pr_debug("addr going backwards, read beyond section?\n");
106 break;
107 }
108 last_addr = addr;
109
110 /* copy it from temporary buffer to 'buf' according
111 * to address on current objdump line */
112 off = addr - start_addr;
113 if (off >= *len)
114 break;
115 written_bytes = MIN(read_bytes, *len - off);
116 memcpy(buf + off, tmp, written_bytes);
117 off_last = off + written_bytes;
91 } 118 }
92 119
120 /* len returns number of bytes that could not be read */
121 *len -= off_last;
122
93 free(line); 123 free(line);
94 124
95 return err; 125 return err;
@@ -103,7 +133,7 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf,
103 FILE *f; 133 FILE *f;
104 int ret; 134 int ret;
105 135
106 fmt = "%s -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s"; 136 fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
107 ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len, 137 ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
108 filename); 138 filename);
109 if (ret <= 0 || (size_t)ret >= sizeof(cmd)) 139 if (ret <= 0 || (size_t)ret >= sizeof(cmd))
@@ -120,7 +150,7 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf,
120 return -1; 150 return -1;
121 } 151 }
122 152
123 ret = read_objdump_output(f, &buf, &len); 153 ret = read_objdump_output(f, buf, &len, addr);
124 if (len) { 154 if (len) {
125 pr_debug("objdump read too few bytes\n"); 155 pr_debug("objdump read too few bytes\n");
126 if (!ret) 156 if (!ret)
@@ -132,6 +162,18 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf,
132 return ret; 162 return ret;
133} 163}
134 164
165static void dump_buf(unsigned char *buf, size_t len)
166{
167 size_t i;
168
169 for (i = 0; i < len; i++) {
170 pr_debug("0x%02x ", buf[i]);
171 if (i % 16 == 15)
172 pr_debug("\n");
173 }
174 pr_debug("\n");
175}
176
135static int read_object_code(u64 addr, size_t len, u8 cpumode, 177static int read_object_code(u64 addr, size_t len, u8 cpumode,
136 struct thread *thread, struct state *state) 178 struct thread *thread, struct state *state)
137{ 179{
@@ -234,6 +276,10 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
234 /* The results should be identical */ 276 /* The results should be identical */
235 if (memcmp(buf1, buf2, len)) { 277 if (memcmp(buf1, buf2, len)) {
236 pr_debug("Bytes read differ from those read by objdump\n"); 278 pr_debug("Bytes read differ from those read by objdump\n");
279 pr_debug("buf1 (dso):\n");
280 dump_buf(buf1, len);
281 pr_debug("buf2 (objdump):\n");
282 dump_buf(buf2, len);
237 return -1; 283 return -1;
238 } 284 }
239 pr_debug("Bytes read match those read by objdump\n"); 285 pr_debug("Bytes read match those read by objdump\n");
@@ -427,7 +473,7 @@ static int do_test_code_reading(bool try_kcore)
427 symbol_conf.kallsyms_name = "/proc/kallsyms"; 473 symbol_conf.kallsyms_name = "/proc/kallsyms";
428 474
429 /* Load kernel map */ 475 /* Load kernel map */
430 map = machine->vmlinux_maps[MAP__FUNCTION]; 476 map = machine__kernel_map(machine);
431 ret = map__load(map, NULL); 477 ret = map__load(map, NULL);
432 if (ret < 0) { 478 if (ret < 0) {
433 pr_debug("map__load failed\n"); 479 pr_debug("map__load failed\n");
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 40b36c462427..07221793a3ac 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -11,6 +11,10 @@
11#include "thread.h" 11#include "thread.h"
12#include "callchain.h" 12#include "callchain.h"
13 13
14#if defined (__x86_64__) || defined (__i386__)
15#include "arch-tests.h"
16#endif
17
14/* For bsearch. We try to unwind functions in shared object. */ 18/* For bsearch. We try to unwind functions in shared object. */
15#include <stdlib.h> 19#include <stdlib.h>
16 20
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 52162425c969..790e413d9a1f 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -1,3 +1,4 @@
1#include <linux/err.h>
1#include <traceevent/event-parse.h> 2#include <traceevent/event-parse.h>
2#include "evsel.h" 3#include "evsel.h"
3#include "tests.h" 4#include "tests.h"
@@ -36,8 +37,8 @@ int test__perf_evsel__tp_sched_test(void)
36 struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch"); 37 struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch");
37 int ret = 0; 38 int ret = 0;
38 39
39 if (evsel == NULL) { 40 if (IS_ERR(evsel)) {
40 pr_debug("perf_evsel__new\n"); 41 pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel));
41 return -1; 42 return -1;
42 } 43 }
43 44
@@ -66,6 +67,11 @@ int test__perf_evsel__tp_sched_test(void)
66 67
67 evsel = perf_evsel__newtp("sched", "sched_wakeup"); 68 evsel = perf_evsel__newtp("sched", "sched_wakeup");
68 69
70 if (IS_ERR(evsel)) {
71 pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel));
72 return -1;
73 }
74
69 if (perf_evsel__test_field(evsel, "comm", 16, true)) 75 if (perf_evsel__test_field(evsel, "comm", 16, true))
70 ret = -1; 76 ret = -1;
71 77
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index ce48775e6ada..818acf875dd0 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -16,30 +16,31 @@ struct sample {
16 struct thread *thread; 16 struct thread *thread;
17 struct map *map; 17 struct map *map;
18 struct symbol *sym; 18 struct symbol *sym;
19 int socket;
19}; 20};
20 21
21/* For the numbers, see hists_common.c */ 22/* For the numbers, see hists_common.c */
22static struct sample fake_samples[] = { 23static struct sample fake_samples[] = {
23 /* perf [kernel] schedule() */ 24 /* perf [kernel] schedule() */
24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, 25 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, .socket = 0 },
25 /* perf [perf] main() */ 26 /* perf [perf] main() */
26 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, 27 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, .socket = 0 },
27 /* perf [libc] malloc() */ 28 /* perf [libc] malloc() */
28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, 29 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, .socket = 0 },
29 /* perf [perf] main() */ 30 /* perf [perf] main() */
30 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, /* will be merged */ 31 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, .socket = 0 }, /* will be merged */
31 /* perf [perf] cmd_record() */ 32 /* perf [perf] cmd_record() */
32 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, }, 33 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, .socket = 1 },
33 /* perf [kernel] page_fault() */ 34 /* perf [kernel] page_fault() */
34 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 35 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 1 },
35 /* bash [bash] main() */ 36 /* bash [bash] main() */
36 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, 37 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, .socket = 2 },
37 /* bash [bash] xmalloc() */ 38 /* bash [bash] xmalloc() */
38 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, 39 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, .socket = 2 },
39 /* bash [libc] malloc() */ 40 /* bash [libc] malloc() */
40 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, 41 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, .socket = 3 },
41 /* bash [kernel] page_fault() */ 42 /* bash [kernel] page_fault() */
42 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 43 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 3 },
43}; 44};
44 45
45static int add_hist_entries(struct perf_evlist *evlist, 46static int add_hist_entries(struct perf_evlist *evlist,
@@ -83,6 +84,7 @@ static int add_hist_entries(struct perf_evlist *evlist,
83 &sample) < 0) 84 &sample) < 0)
84 goto out; 85 goto out;
85 86
87 al.socket = fake_samples[i].socket;
86 if (hist_entry_iter__add(&iter, &al, 88 if (hist_entry_iter__add(&iter, &al,
87 PERF_MAX_STACK_DEPTH, NULL) < 0) { 89 PERF_MAX_STACK_DEPTH, NULL) < 0) {
88 addr_location__put(&al); 90 addr_location__put(&al);
@@ -253,6 +255,39 @@ int test__hists_filter(void)
253 TEST_ASSERT_VAL("Unmatched total period for symbol filter", 255 TEST_ASSERT_VAL("Unmatched total period for symbol filter",
254 hists->stats.total_non_filtered_period == 300); 256 hists->stats.total_non_filtered_period == 300);
255 257
258 /* remove symbol filter first */
259 hists->symbol_filter_str = NULL;
260 hists__filter_by_symbol(hists);
261
262 /* now applying socket filters */
263 hists->socket_filter = 2;
264 hists__filter_by_socket(hists);
265
266 if (verbose > 2) {
267 pr_info("Histogram for socket filters\n");
268 print_hists_out(hists);
269 }
270
271 /* normal stats should be invariant */
272 TEST_ASSERT_VAL("Invalid nr samples",
273 hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
274 TEST_ASSERT_VAL("Invalid nr hist entries",
275 hists->nr_entries == 9);
276 TEST_ASSERT_VAL("Invalid total period",
277 hists->stats.total_period == 1000);
278
279 /* but filter stats are changed */
280 TEST_ASSERT_VAL("Unmatched nr samples for socket filter",
281 hists->stats.nr_non_filtered_samples == 2);
282 TEST_ASSERT_VAL("Unmatched nr hist entries for socket filter",
283 hists->nr_non_filtered_entries == 2);
284 TEST_ASSERT_VAL("Unmatched total period for socket filter",
285 hists->stats.total_non_filtered_period == 200);
286
287 /* remove socket filter first */
288 hists->socket_filter = -1;
289 hists__filter_by_socket(hists);
290
256 /* now applying all filters at once. */ 291 /* now applying all filters at once. */
257 hists->thread_filter = fake_samples[1].thread; 292 hists->thread_filter = fake_samples[1].thread;
258 hists->dso_filter = fake_samples[1].map->dso; 293 hists->dso_filter = fake_samples[1].map->dso;
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index ba31c4bd441d..2cbd0c6901e3 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -44,6 +44,7 @@ make_no_libnuma := NO_LIBNUMA=1
44make_no_libaudit := NO_LIBAUDIT=1 44make_no_libaudit := NO_LIBAUDIT=1
45make_no_libbionic := NO_LIBBIONIC=1 45make_no_libbionic := NO_LIBBIONIC=1
46make_no_auxtrace := NO_AUXTRACE=1 46make_no_auxtrace := NO_AUXTRACE=1
47make_no_libbpf := NO_LIBBPF=1
47make_tags := tags 48make_tags := tags
48make_cscope := cscope 49make_cscope := cscope
49make_help := help 50make_help := help
@@ -66,7 +67,7 @@ make_static := LDFLAGS=-static
66make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1 67make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
67make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1 68make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
68make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1 69make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
69make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 70make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
70 71
71# $(run) contains all available tests 72# $(run) contains all available tests
72run := make_pure 73run := make_pure
@@ -94,6 +95,7 @@ run += make_no_libnuma
94run += make_no_libaudit 95run += make_no_libaudit
95run += make_no_libbionic 96run += make_no_libbionic
96run += make_no_auxtrace 97run += make_no_auxtrace
98run += make_no_libbpf
97run += make_help 99run += make_help
98run += make_doc 100run += make_doc
99run += make_perf_o 101run += make_perf_o
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 666b67a4df9d..4495493c9431 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -3,6 +3,7 @@
3#include "thread_map.h" 3#include "thread_map.h"
4#include "cpumap.h" 4#include "cpumap.h"
5#include "tests.h" 5#include "tests.h"
6#include <linux/err.h>
6 7
7/* 8/*
8 * This test will generate random numbers of calls to some getpid syscalls, 9 * This test will generate random numbers of calls to some getpid syscalls,
@@ -65,7 +66,7 @@ int test__basic_mmap(void)
65 66
66 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); 67 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
67 evsels[i] = perf_evsel__newtp("syscalls", name); 68 evsels[i] = perf_evsel__newtp("syscalls", name);
68 if (evsels[i] == NULL) { 69 if (IS_ERR(evsels[i])) {
69 pr_debug("perf_evsel__new\n"); 70 pr_debug("perf_evsel__new\n");
70 goto out_delete_evlist; 71 goto out_delete_evlist;
71 } 72 }
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index a572f87e9c8d..2006485a2859 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -1,3 +1,5 @@
1#include <api/fs/fs.h>
2#include <linux/err.h>
1#include "evsel.h" 3#include "evsel.h"
2#include "tests.h" 4#include "tests.h"
3#include "thread_map.h" 5#include "thread_map.h"
@@ -14,6 +16,7 @@ int test__openat_syscall_event_on_all_cpus(void)
14 cpu_set_t cpu_set; 16 cpu_set_t cpu_set;
15 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 17 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
16 char sbuf[STRERR_BUFSIZE]; 18 char sbuf[STRERR_BUFSIZE];
19 char errbuf[BUFSIZ];
17 20
18 if (threads == NULL) { 21 if (threads == NULL) {
19 pr_debug("thread_map__new\n"); 22 pr_debug("thread_map__new\n");
@@ -29,13 +32,9 @@ int test__openat_syscall_event_on_all_cpus(void)
29 CPU_ZERO(&cpu_set); 32 CPU_ZERO(&cpu_set);
30 33
31 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); 34 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
32 if (evsel == NULL) { 35 if (IS_ERR(evsel)) {
33 if (tracefs_configured()) 36 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
34 pr_debug("is tracefs mounted on /sys/kernel/tracing?\n"); 37 pr_debug("%s\n", errbuf);
35 else if (debugfs_configured())
36 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
37 else
38 pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
39 goto out_thread_map_delete; 38 goto out_thread_map_delete;
40 } 39 }
41 40
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index 01a19626c846..5e811cd8f1c3 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -1,3 +1,4 @@
1#include <linux/err.h>
1#include "perf.h" 2#include "perf.h"
2#include "evlist.h" 3#include "evlist.h"
3#include "evsel.h" 4#include "evsel.h"
@@ -30,7 +31,7 @@ int test__syscall_openat_tp_fields(void)
30 } 31 }
31 32
32 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); 33 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
33 if (evsel == NULL) { 34 if (IS_ERR(evsel)) {
34 pr_debug("%s: perf_evsel__newtp\n", __func__); 35 pr_debug("%s: perf_evsel__newtp\n", __func__);
35 goto out_delete_evlist; 36 goto out_delete_evlist;
36 } 37 }
@@ -88,7 +89,7 @@ int test__syscall_openat_tp_fields(void)
88 89
89 err = perf_evsel__parse_sample(evsel, event, &sample); 90 err = perf_evsel__parse_sample(evsel, event, &sample);
90 if (err) { 91 if (err) {
91 pr_err("Can't parse sample, err = %d\n", err); 92 pr_debug("Can't parse sample, err = %d\n", err);
92 goto out_delete_evlist; 93 goto out_delete_evlist;
93 } 94 }
94 95
diff --git a/tools/perf/tests/openat-syscall.c b/tools/perf/tests/openat-syscall.c
index c9a37bc6b33a..033b54797b8a 100644
--- a/tools/perf/tests/openat-syscall.c
+++ b/tools/perf/tests/openat-syscall.c
@@ -1,3 +1,5 @@
1#include <api/fs/tracing_path.h>
2#include <linux/err.h>
1#include "thread_map.h" 3#include "thread_map.h"
2#include "evsel.h" 4#include "evsel.h"
3#include "debug.h" 5#include "debug.h"
@@ -10,6 +12,7 @@ int test__openat_syscall_event(void)
10 unsigned int nr_openat_calls = 111, i; 12 unsigned int nr_openat_calls = 111, i;
11 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 13 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
12 char sbuf[STRERR_BUFSIZE]; 14 char sbuf[STRERR_BUFSIZE];
15 char errbuf[BUFSIZ];
13 16
14 if (threads == NULL) { 17 if (threads == NULL) {
15 pr_debug("thread_map__new\n"); 18 pr_debug("thread_map__new\n");
@@ -17,13 +20,9 @@ int test__openat_syscall_event(void)
17 } 20 }
18 21
19 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); 22 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
20 if (evsel == NULL) { 23 if (IS_ERR(evsel)) {
21 if (tracefs_configured()) 24 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
22 pr_debug("is tracefs mounted on /sys/kernel/tracing?\n"); 25 pr_debug("%s\n", errbuf);
23 else if (debugfs_configured())
24 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
25 else
26 pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
27 goto out_thread_map_delete; 26 goto out_thread_map_delete;
28 } 27 }
29 28
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 9b6b2b6324a1..636d7b42d844 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -3,11 +3,11 @@
3#include "evsel.h" 3#include "evsel.h"
4#include "evlist.h" 4#include "evlist.h"
5#include <api/fs/fs.h> 5#include <api/fs/fs.h>
6#include <api/fs/tracefs.h>
7#include <api/fs/debugfs.h>
8#include "tests.h" 6#include "tests.h"
9#include "debug.h" 7#include "debug.h"
8#include "util.h"
10#include <linux/hw_breakpoint.h> 9#include <linux/hw_breakpoint.h>
10#include <api/fs/fs.h>
11 11
12#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \ 12#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
13 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD) 13 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
@@ -1260,25 +1260,24 @@ test__checkevent_breakpoint_len_rw_modifier(struct perf_evlist *evlist)
1260 return test__checkevent_breakpoint_rw(evlist); 1260 return test__checkevent_breakpoint_rw(evlist);
1261} 1261}
1262 1262
1263static int test__checkevent_precise_max_modifier(struct perf_evlist *evlist)
1264{
1265 struct perf_evsel *evsel = perf_evlist__first(evlist);
1266
1267 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
1268 TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
1269 TEST_ASSERT_VAL("wrong config",
1270 PERF_COUNT_SW_TASK_CLOCK == evsel->attr.config);
1271 return 0;
1272}
1273
1263static int count_tracepoints(void) 1274static int count_tracepoints(void)
1264{ 1275{
1265 char events_path[PATH_MAX];
1266 struct dirent *events_ent; 1276 struct dirent *events_ent;
1267 const char *mountpoint;
1268 DIR *events_dir; 1277 DIR *events_dir;
1269 int cnt = 0; 1278 int cnt = 0;
1270 1279
1271 mountpoint = tracefs_find_mountpoint(); 1280 events_dir = opendir(tracing_events_path);
1272 if (mountpoint) {
1273 scnprintf(events_path, PATH_MAX, "%s/events",
1274 mountpoint);
1275 } else {
1276 mountpoint = debugfs_find_mountpoint();
1277 scnprintf(events_path, PATH_MAX, "%s/tracing/events",
1278 mountpoint);
1279 }
1280
1281 events_dir = opendir(events_path);
1282 1281
1283 TEST_ASSERT_VAL("Can't open events dir", events_dir); 1282 TEST_ASSERT_VAL("Can't open events dir", events_dir);
1284 1283
@@ -1295,7 +1294,7 @@ static int count_tracepoints(void)
1295 continue; 1294 continue;
1296 1295
1297 scnprintf(sys_path, PATH_MAX, "%s/%s", 1296 scnprintf(sys_path, PATH_MAX, "%s/%s",
1298 events_path, events_ent->d_name); 1297 tracing_events_path, events_ent->d_name);
1299 1298
1300 sys_dir = opendir(sys_path); 1299 sys_dir = opendir(sys_path);
1301 TEST_ASSERT_VAL("Can't open sys dir", sys_dir); 1300 TEST_ASSERT_VAL("Can't open sys dir", sys_dir);
@@ -1575,6 +1574,11 @@ static struct evlist_test test__events[] = {
1575 .check = test__checkevent_exclude_idle_modifier_1, 1574 .check = test__checkevent_exclude_idle_modifier_1,
1576 .id = 46, 1575 .id = 46,
1577 }, 1576 },
1577 {
1578 .name = "task-clock:P,cycles",
1579 .check = test__checkevent_precise_max_modifier,
1580 .id = 47,
1581 },
1578}; 1582};
1579 1583
1580static struct evlist_test test__events_pmu[] = { 1584static struct evlist_test test__events_pmu[] = {
@@ -1750,6 +1754,17 @@ static int test_pmu_events(void)
1750 return ret; 1754 return ret;
1751} 1755}
1752 1756
1757static void debug_warn(const char *warn, va_list params)
1758{
1759 char msg[1024];
1760
1761 if (!verbose)
1762 return;
1763
1764 vsnprintf(msg, sizeof(msg), warn, params);
1765 fprintf(stderr, " Warning: %s\n", msg);
1766}
1767
1753int test__parse_events(void) 1768int test__parse_events(void)
1754{ 1769{
1755 int ret1, ret2 = 0; 1770 int ret1, ret2 = 0;
@@ -1761,6 +1776,8 @@ do { \
1761 ret2 = ret1; \ 1776 ret2 = ret1; \
1762} while (0) 1777} while (0)
1763 1778
1779 set_warning_routine(debug_warn);
1780
1764 TEST_EVENTS(test__events); 1781 TEST_EVENTS(test__events);
1765 1782
1766 if (test_pmu()) 1783 if (test_pmu())
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index bf113a247987..c80486969f83 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -24,13 +24,17 @@ enum {
24 TEST_SKIP = -2, 24 TEST_SKIP = -2,
25}; 25};
26 26
27struct test {
28 const char *desc;
29 int (*func)(void);
30};
31
27/* Tests */ 32/* Tests */
28int test__vmlinux_matches_kallsyms(void); 33int test__vmlinux_matches_kallsyms(void);
29int test__openat_syscall_event(void); 34int test__openat_syscall_event(void);
30int test__openat_syscall_event_on_all_cpus(void); 35int test__openat_syscall_event_on_all_cpus(void);
31int test__basic_mmap(void); 36int test__basic_mmap(void);
32int test__PERF_RECORD(void); 37int test__PERF_RECORD(void);
33int test__rdpmc(void);
34int test__perf_evsel__roundtrip_name_test(void); 38int test__perf_evsel__roundtrip_name_test(void);
35int test__perf_evsel__tp_sched_test(void); 39int test__perf_evsel__tp_sched_test(void);
36int test__syscall_openat_tp_fields(void); 40int test__syscall_openat_tp_fields(void);
@@ -46,7 +50,6 @@ int test__bp_signal(void);
46int test__bp_signal_overflow(void); 50int test__bp_signal_overflow(void);
47int test__task_exit(void); 51int test__task_exit(void);
48int test__sw_clock_freq(void); 52int test__sw_clock_freq(void);
49int test__perf_time_to_tsc(void);
50int test__code_reading(void); 53int test__code_reading(void);
51int test__sample_parsing(void); 54int test__sample_parsing(void);
52int test__keep_tracking(void); 55int test__keep_tracking(void);
@@ -63,8 +66,9 @@ int test__fdarray__add(void);
63int test__kmod_path__parse(void); 66int test__kmod_path__parse(void);
64int test__thread_map(void); 67int test__thread_map(void);
65int test__llvm(void); 68int test__llvm(void);
69int test_session_topology(void);
66 70
67#if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__) 71#if defined(__arm__) || defined(__aarch64__)
68#ifdef HAVE_DWARF_UNWIND_SUPPORT 72#ifdef HAVE_DWARF_UNWIND_SUPPORT
69struct thread; 73struct thread;
70struct perf_sample; 74struct perf_sample;
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
new file mode 100644
index 000000000000..f5bb096c3bd9
--- /dev/null
+++ b/tools/perf/tests/topology.c
@@ -0,0 +1,115 @@
1#include <string.h>
2#include <stdlib.h>
3#include <stdio.h>
4#include "tests.h"
5#include "util.h"
6#include "session.h"
7#include "evlist.h"
8#include "debug.h"
9
10#define TEMPL "/tmp/perf-test-XXXXXX"
11#define DATA_SIZE 10
12
13static int get_temp(char *path)
14{
15 int fd;
16
17 strcpy(path, TEMPL);
18
19 fd = mkstemp(path);
20 if (fd < 0) {
21 perror("mkstemp failed");
22 return -1;
23 }
24
25 close(fd);
26 return 0;
27}
28
29static int session_write_header(char *path)
30{
31 struct perf_session *session;
32 struct perf_data_file file = {
33 .path = path,
34 .mode = PERF_DATA_MODE_WRITE,
35 };
36
37 session = perf_session__new(&file, false, NULL);
38 TEST_ASSERT_VAL("can't get session", session);
39
40 session->evlist = perf_evlist__new_default();
41 TEST_ASSERT_VAL("can't get evlist", session->evlist);
42
43 perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
44 perf_header__set_feat(&session->header, HEADER_NRCPUS);
45
46 session->header.data_size += DATA_SIZE;
47
48 TEST_ASSERT_VAL("failed to write header",
49 !perf_session__write_header(session, session->evlist, file.fd, true));
50
51 perf_session__delete(session);
52
53 return 0;
54}
55
56static int check_cpu_topology(char *path, struct cpu_map *map)
57{
58 struct perf_session *session;
59 struct perf_data_file file = {
60 .path = path,
61 .mode = PERF_DATA_MODE_READ,
62 };
63 int i;
64
65 session = perf_session__new(&file, false, NULL);
66 TEST_ASSERT_VAL("can't get session", session);
67
68 for (i = 0; i < session->header.env.nr_cpus_online; i++) {
69 pr_debug("CPU %d, core %d, socket %d\n", i,
70 session->header.env.cpu[i].core_id,
71 session->header.env.cpu[i].socket_id);
72 }
73
74 for (i = 0; i < map->nr; i++) {
75 TEST_ASSERT_VAL("Core ID doesn't match",
76 (session->header.env.cpu[map->map[i]].core_id == (cpu_map__get_core(map, i, NULL) & 0xffff)));
77
78 TEST_ASSERT_VAL("Socket ID doesn't match",
79 (session->header.env.cpu[map->map[i]].socket_id == cpu_map__get_socket(map, i, NULL)));
80 }
81
82 perf_session__delete(session);
83
84 return 0;
85}
86
87int test_session_topology(void)
88{
89 char path[PATH_MAX];
90 struct cpu_map *map;
91 int ret = -1;
92
93 TEST_ASSERT_VAL("can't get templ file", !get_temp(path));
94
95 pr_debug("templ file: %s\n", path);
96
97 if (session_write_header(path))
98 goto free_path;
99
100 map = cpu_map__new(NULL);
101 if (map == NULL) {
102 pr_debug("failed to get system cpumap\n");
103 goto free_path;
104 }
105
106 if (check_cpu_topology(path, map))
107 goto free_map;
108 ret = 0;
109
110free_map:
111 cpu_map__put(map);
112free_path:
113 unlink(path);
114 return ret;
115}
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index b34c5fc829ae..d677e018e504 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -68,7 +68,7 @@ int test__vmlinux_matches_kallsyms(void)
68 * to see if the running kernel was relocated by checking if it has the 68 * to see if the running kernel was relocated by checking if it has the
69 * same value in the vmlinux file we load. 69 * same value in the vmlinux file we load.
70 */ 70 */
71 kallsyms_map = machine__kernel_map(&kallsyms, type); 71 kallsyms_map = machine__kernel_map(&kallsyms);
72 72
73 /* 73 /*
74 * Step 5: 74 * Step 5:
@@ -80,7 +80,7 @@ int test__vmlinux_matches_kallsyms(void)
80 goto out; 80 goto out;
81 } 81 }
82 82
83 vmlinux_map = machine__kernel_map(&vmlinux, type); 83 vmlinux_map = machine__kernel_map(&vmlinux);
84 84
85 /* 85 /*
86 * Step 6: 86 * Step 6:
diff --git a/tools/perf/trace/strace/groups/file b/tools/perf/trace/strace/groups/file
index 62378a899d79..722e25d200bf 100644
--- a/tools/perf/trace/strace/groups/file
+++ b/tools/perf/trace/strace/groups/file
@@ -9,6 +9,7 @@ mkdir
9open 9open
10openat 10openat
11quotactl 11quotactl
12read
12readlink 13readlink
13rename 14rename
14rmdir 15rmdir
@@ -16,3 +17,4 @@ stat
16statfs 17statfs
17symlink 18symlink
18unlink 19unlink
20write
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index c6c7e5189214..e9703c0829f1 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -393,6 +393,7 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
393 393
394 if (browser->use_navkeypressed && !browser->navkeypressed) { 394 if (browser->use_navkeypressed && !browser->navkeypressed) {
395 if (key == K_DOWN || key == K_UP || 395 if (key == K_DOWN || key == K_UP ||
396 (browser->columns && (key == K_LEFT || key == K_RIGHT)) ||
396 key == K_PGDN || key == K_PGUP || 397 key == K_PGDN || key == K_PGUP ||
397 key == K_HOME || key == K_END || 398 key == K_HOME || key == K_END ||
398 key == ' ') { 399 key == ' ') {
@@ -421,6 +422,18 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
421 browser->seek(browser, -1, SEEK_CUR); 422 browser->seek(browser, -1, SEEK_CUR);
422 } 423 }
423 break; 424 break;
425 case K_RIGHT:
426 if (!browser->columns)
427 goto out;
428 if (browser->horiz_scroll < browser->columns - 1)
429 ++browser->horiz_scroll;
430 break;
431 case K_LEFT:
432 if (!browser->columns)
433 goto out;
434 if (browser->horiz_scroll != 0)
435 --browser->horiz_scroll;
436 break;
424 case K_PGDN: 437 case K_PGDN:
425 case ' ': 438 case ' ':
426 if (browser->top_idx + browser->rows > browser->nr_entries - 1) 439 if (browser->top_idx + browser->rows > browser->nr_entries - 1)
@@ -459,6 +472,7 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
459 browser->seek(browser, -offset, SEEK_END); 472 browser->seek(browser, -offset, SEEK_END);
460 break; 473 break;
461 default: 474 default:
475 out:
462 return key; 476 return key;
463 } 477 }
464 } 478 }
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index f3cef564de02..01781de59532 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -14,7 +14,7 @@
14struct ui_browser { 14struct ui_browser {
15 u64 index, top_idx; 15 u64 index, top_idx;
16 void *top, *entries; 16 void *top, *entries;
17 u16 y, x, width, height, rows; 17 u16 y, x, width, height, rows, columns, horiz_scroll;
18 int current_color; 18 int current_color;
19 void *priv; 19 void *priv;
20 const char *title; 20 const char *title;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 29739b347599..d4d7cc27252f 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -768,8 +768,8 @@ static int annotate_browser__run(struct annotate_browser *browser,
768 "UP/DOWN/PGUP\n" 768 "UP/DOWN/PGUP\n"
769 "PGDN/SPACE Navigate\n" 769 "PGDN/SPACE Navigate\n"
770 "q/ESC/CTRL+C Exit\n\n" 770 "q/ESC/CTRL+C Exit\n\n"
771 "-> Go to target\n" 771 "ENTER Go to target\n"
772 "<- Exit\n" 772 "ESC Exit\n"
773 "H Cycle thru hottest instructions\n" 773 "H Cycle thru hottest instructions\n"
774 "j Toggle showing jump to target arrows\n" 774 "j Toggle showing jump to target arrows\n"
775 "J Toggle showing number of jump sources on targets\n" 775 "J Toggle showing number of jump sources on targets\n"
@@ -1056,7 +1056,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
1056 goto out_free_offsets; 1056 goto out_free_offsets;
1057 } 1057 }
1058 1058
1059 ui_helpline__push("Press <- or ESC to exit"); 1059 ui_helpline__push("Press ESC to exit");
1060 1060
1061 notes = symbol__annotation(sym); 1061 notes = symbol__annotation(sym);
1062 browser.start = map__rip_2objdump(map, sym->start); 1062 browser.start = map__rip_2objdump(map, sym->start);
@@ -1125,8 +1125,8 @@ static struct annotate_config {
1125 ANNOTATE_CFG(jump_arrows), 1125 ANNOTATE_CFG(jump_arrows),
1126 ANNOTATE_CFG(show_linenr), 1126 ANNOTATE_CFG(show_linenr),
1127 ANNOTATE_CFG(show_nr_jumps), 1127 ANNOTATE_CFG(show_nr_jumps),
1128 ANNOTATE_CFG(use_offset),
1129 ANNOTATE_CFG(show_total_period), 1128 ANNOTATE_CFG(show_total_period),
1129 ANNOTATE_CFG(use_offset),
1130}; 1130};
1131 1131
1132#undef ANNOTATE_CFG 1132#undef ANNOTATE_CFG
@@ -1152,9 +1152,9 @@ static int annotate__config(const char *var, const char *value,
1152 sizeof(struct annotate_config), annotate_config__cmp); 1152 sizeof(struct annotate_config), annotate_config__cmp);
1153 1153
1154 if (cfg == NULL) 1154 if (cfg == NULL)
1155 return -1; 1155 ui__warning("%s variable unknown, ignoring...", var);
1156 1156 else
1157 *cfg->value = perf_config_bool(name, value); 1157 *cfg->value = perf_config_bool(name, value);
1158 return 0; 1158 return 0;
1159} 1159}
1160 1160
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index c04c60d4863c..e5afb8936040 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -784,11 +784,12 @@ static int hist_browser__show_entry(struct hist_browser *browser,
784 .size = sizeof(s), 784 .size = sizeof(s),
785 .ptr = &arg, 785 .ptr = &arg,
786 }; 786 };
787 int column = 0;
787 788
788 hist_browser__gotorc(browser, row, 0); 789 hist_browser__gotorc(browser, row, 0);
789 790
790 perf_hpp__for_each_format(fmt) { 791 perf_hpp__for_each_format(fmt) {
791 if (perf_hpp__should_skip(fmt)) 792 if (perf_hpp__should_skip(fmt) || column++ < browser->b.horiz_scroll)
792 continue; 793 continue;
793 794
794 if (current_entry && browser->b.navkeypressed) { 795 if (current_entry && browser->b.navkeypressed) {
@@ -861,14 +862,16 @@ static int advance_hpp_check(struct perf_hpp *hpp, int inc)
861 return hpp->size <= 0; 862 return hpp->size <= 0;
862} 863}
863 864
864static int hists__scnprintf_headers(char *buf, size_t size, struct hists *hists) 865static int hists_browser__scnprintf_headers(struct hist_browser *browser, char *buf, size_t size)
865{ 866{
867 struct hists *hists = browser->hists;
866 struct perf_hpp dummy_hpp = { 868 struct perf_hpp dummy_hpp = {
867 .buf = buf, 869 .buf = buf,
868 .size = size, 870 .size = size,
869 }; 871 };
870 struct perf_hpp_fmt *fmt; 872 struct perf_hpp_fmt *fmt;
871 size_t ret = 0; 873 size_t ret = 0;
874 int column = 0;
872 875
873 if (symbol_conf.use_callchain) { 876 if (symbol_conf.use_callchain) {
874 ret = scnprintf(buf, size, " "); 877 ret = scnprintf(buf, size, " ");
@@ -877,7 +880,7 @@ static int hists__scnprintf_headers(char *buf, size_t size, struct hists *hists)
877 } 880 }
878 881
879 perf_hpp__for_each_format(fmt) { 882 perf_hpp__for_each_format(fmt) {
880 if (perf_hpp__should_skip(fmt)) 883 if (perf_hpp__should_skip(fmt) || column++ < browser->b.horiz_scroll)
881 continue; 884 continue;
882 885
883 ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists)); 886 ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
@@ -896,7 +899,7 @@ static void hist_browser__show_headers(struct hist_browser *browser)
896{ 899{
897 char headers[1024]; 900 char headers[1024];
898 901
899 hists__scnprintf_headers(headers, sizeof(headers), browser->hists); 902 hists_browser__scnprintf_headers(browser, headers, sizeof(headers));
900 ui_browser__gotorc(&browser->b, 0, 0); 903 ui_browser__gotorc(&browser->b, 0, 0);
901 ui_browser__set_color(&browser->b, HE_COLORSET_ROOT); 904 ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
902 ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1); 905 ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
@@ -1261,6 +1264,7 @@ static int hists__browser_title(struct hists *hists,
1261 int printed; 1264 int printed;
1262 const struct dso *dso = hists->dso_filter; 1265 const struct dso *dso = hists->dso_filter;
1263 const struct thread *thread = hists->thread_filter; 1266 const struct thread *thread = hists->thread_filter;
1267 int socket_id = hists->socket_filter;
1264 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; 1268 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
1265 u64 nr_events = hists->stats.total_period; 1269 u64 nr_events = hists->stats.total_period;
1266 struct perf_evsel *evsel = hists_to_evsel(hists); 1270 struct perf_evsel *evsel = hists_to_evsel(hists);
@@ -1314,6 +1318,9 @@ static int hists__browser_title(struct hists *hists,
1314 if (dso) 1318 if (dso)
1315 printed += scnprintf(bf + printed, size - printed, 1319 printed += scnprintf(bf + printed, size - printed,
1316 ", DSO: %s", dso->short_name); 1320 ", DSO: %s", dso->short_name);
1321 if (socket_id > -1)
1322 printed += scnprintf(bf + printed, size - printed,
1323 ", Processor Socket: %d", socket_id);
1317 if (!is_report_browser(hbt)) { 1324 if (!is_report_browser(hbt)) {
1318 struct perf_top *top = hbt->arg; 1325 struct perf_top *top = hbt->arg;
1319 1326
@@ -1425,6 +1432,7 @@ struct popup_action {
1425 struct thread *thread; 1432 struct thread *thread;
1426 struct dso *dso; 1433 struct dso *dso;
1427 struct map_symbol ms; 1434 struct map_symbol ms;
1435 int socket;
1428 1436
1429 int (*fn)(struct hist_browser *browser, struct popup_action *act); 1437 int (*fn)(struct hist_browser *browser, struct popup_action *act);
1430}; 1438};
@@ -1437,7 +1445,7 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
1437 struct hist_entry *he; 1445 struct hist_entry *he;
1438 int err; 1446 int err;
1439 1447
1440 if (!objdump_path && perf_session_env__lookup_objdump(browser->env)) 1448 if (!objdump_path && perf_env__lookup_objdump(browser->env))
1441 return 0; 1449 return 0;
1442 1450
1443 notes = symbol__annotation(act->ms.sym); 1451 notes = symbol__annotation(act->ms.sym);
@@ -1488,7 +1496,7 @@ do_zoom_thread(struct hist_browser *browser, struct popup_action *act)
1488 thread__zput(browser->hists->thread_filter); 1496 thread__zput(browser->hists->thread_filter);
1489 ui_helpline__pop(); 1497 ui_helpline__pop();
1490 } else { 1498 } else {
1491 ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"", 1499 ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s(%d) thread\"",
1492 thread->comm_set ? thread__comm_str(thread) : "", 1500 thread->comm_set ? thread__comm_str(thread) : "",
1493 thread->tid); 1501 thread->tid);
1494 browser->hists->thread_filter = thread__get(thread); 1502 browser->hists->thread_filter = thread__get(thread);
@@ -1522,7 +1530,7 @@ add_thread_opt(struct hist_browser *browser, struct popup_action *act,
1522static int 1530static int
1523do_zoom_dso(struct hist_browser *browser, struct popup_action *act) 1531do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
1524{ 1532{
1525 struct dso *dso = act->dso; 1533 struct map *map = act->ms.map;
1526 1534
1527 if (browser->hists->dso_filter) { 1535 if (browser->hists->dso_filter) {
1528 pstack__remove(browser->pstack, &browser->hists->dso_filter); 1536 pstack__remove(browser->pstack, &browser->hists->dso_filter);
@@ -1530,11 +1538,11 @@ do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
1530 browser->hists->dso_filter = NULL; 1538 browser->hists->dso_filter = NULL;
1531 ui_helpline__pop(); 1539 ui_helpline__pop();
1532 } else { 1540 } else {
1533 if (dso == NULL) 1541 if (map == NULL)
1534 return 0; 1542 return 0;
1535 ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"", 1543 ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s DSO\"",
1536 dso->kernel ? "the Kernel" : dso->short_name); 1544 __map__is_kernel(map) ? "the Kernel" : map->dso->short_name);
1537 browser->hists->dso_filter = dso; 1545 browser->hists->dso_filter = map->dso;
1538 perf_hpp__set_elide(HISTC_DSO, true); 1546 perf_hpp__set_elide(HISTC_DSO, true);
1539 pstack__push(browser->pstack, &browser->hists->dso_filter); 1547 pstack__push(browser->pstack, &browser->hists->dso_filter);
1540 } 1548 }
@@ -1546,17 +1554,18 @@ do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
1546 1554
1547static int 1555static int
1548add_dso_opt(struct hist_browser *browser, struct popup_action *act, 1556add_dso_opt(struct hist_browser *browser, struct popup_action *act,
1549 char **optstr, struct dso *dso) 1557 char **optstr, struct map *map)
1550{ 1558{
1551 if (dso == NULL) 1559 if (map == NULL)
1552 return 0; 1560 return 0;
1553 1561
1554 if (asprintf(optstr, "Zoom %s %s DSO", 1562 if (asprintf(optstr, "Zoom %s %s DSO",
1555 browser->hists->dso_filter ? "out of" : "into", 1563 browser->hists->dso_filter ? "out of" : "into",
1556 dso->kernel ? "the Kernel" : dso->short_name) < 0) 1564 __map__is_kernel(map) ? "the Kernel" : map->dso->short_name) < 0)
1557 return 0; 1565 return 0;
1558 1566
1559 act->dso = dso; 1567 act->ms.map = map;
1568 act->dso = map->dso;
1560 act->fn = do_zoom_dso; 1569 act->fn = do_zoom_dso;
1561 return 1; 1570 return 1;
1562} 1571}
@@ -1672,6 +1681,41 @@ add_exit_opt(struct hist_browser *browser __maybe_unused,
1672 return 1; 1681 return 1;
1673} 1682}
1674 1683
1684static int
1685do_zoom_socket(struct hist_browser *browser, struct popup_action *act)
1686{
1687 if (browser->hists->socket_filter > -1) {
1688 pstack__remove(browser->pstack, &browser->hists->socket_filter);
1689 browser->hists->socket_filter = -1;
1690 perf_hpp__set_elide(HISTC_SOCKET, false);
1691 } else {
1692 browser->hists->socket_filter = act->socket;
1693 perf_hpp__set_elide(HISTC_SOCKET, true);
1694 pstack__push(browser->pstack, &browser->hists->socket_filter);
1695 }
1696
1697 hists__filter_by_socket(browser->hists);
1698 hist_browser__reset(browser);
1699 return 0;
1700}
1701
1702static int
1703add_socket_opt(struct hist_browser *browser, struct popup_action *act,
1704 char **optstr, int socket_id)
1705{
1706 if (socket_id < 0)
1707 return 0;
1708
1709 if (asprintf(optstr, "Zoom %s Processor Socket %d",
1710 (browser->hists->socket_filter > -1) ? "out of" : "into",
1711 socket_id) < 0)
1712 return 0;
1713
1714 act->socket = socket_id;
1715 act->fn = do_zoom_socket;
1716 return 1;
1717}
1718
1675static void hist_browser__update_nr_entries(struct hist_browser *hb) 1719static void hist_browser__update_nr_entries(struct hist_browser *hb)
1676{ 1720{
1677 u64 nr_entries = 0; 1721 u64 nr_entries = 0;
@@ -1717,14 +1761,16 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1717 "For multiple event sessions:\n\n" \ 1761 "For multiple event sessions:\n\n" \
1718 "TAB/UNTAB Switch events\n\n" \ 1762 "TAB/UNTAB Switch events\n\n" \
1719 "For symbolic views (--sort has sym):\n\n" \ 1763 "For symbolic views (--sort has sym):\n\n" \
1720 "-> Zoom into DSO/Threads & Annotate current symbol\n" \ 1764 "ENTER Zoom into DSO/Threads & Annotate current symbol\n" \
1721 "<- Zoom out\n" \ 1765 "ESC Zoom out\n" \
1722 "a Annotate current symbol\n" \ 1766 "a Annotate current symbol\n" \
1723 "C Collapse all callchains\n" \ 1767 "C Collapse all callchains\n" \
1724 "d Zoom into current DSO\n" \ 1768 "d Zoom into current DSO\n" \
1725 "E Expand all callchains\n" \ 1769 "E Expand all callchains\n" \
1726 "F Toggle percentage of filtered entries\n" \ 1770 "F Toggle percentage of filtered entries\n" \
1727 "H Display column headers\n" \ 1771 "H Display column headers\n" \
1772 "m Display context menu\n" \
1773 "S Zoom into current Processor Socket\n" \
1728 1774
1729 /* help messages are sorted by lexical order of the hotkey */ 1775 /* help messages are sorted by lexical order of the hotkey */
1730 const char report_help[] = HIST_BROWSER_HELP_COMMON 1776 const char report_help[] = HIST_BROWSER_HELP_COMMON
@@ -1755,7 +1801,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1755 hist_browser__update_nr_entries(browser); 1801 hist_browser__update_nr_entries(browser);
1756 } 1802 }
1757 1803
1758 browser->pstack = pstack__new(2); 1804 browser->pstack = pstack__new(3);
1759 if (browser->pstack == NULL) 1805 if (browser->pstack == NULL)
1760 goto out; 1806 goto out;
1761 1807
@@ -1764,8 +1810,17 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1764 memset(options, 0, sizeof(options)); 1810 memset(options, 0, sizeof(options));
1765 memset(actions, 0, sizeof(actions)); 1811 memset(actions, 0, sizeof(actions));
1766 1812
1767 perf_hpp__for_each_format(fmt) 1813 perf_hpp__for_each_format(fmt) {
1768 perf_hpp__reset_width(fmt, hists); 1814 perf_hpp__reset_width(fmt, hists);
1815 /*
1816 * This is done just once, and activates the horizontal scrolling
1817 * code in the ui_browser code, it would be better to have a the
1818 * counter in the perf_hpp code, but I couldn't find doing it here
1819 * works, FIXME by setting this in hist_browser__new, for now, be
1820 * clever 8-)
1821 */
1822 ++browser->b.columns;
1823 }
1769 1824
1770 if (symbol_conf.col_width_list_str) 1825 if (symbol_conf.col_width_list_str)
1771 perf_hpp__set_user_width(symbol_conf.col_width_list_str); 1826 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
@@ -1773,7 +1828,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1773 while (1) { 1828 while (1) {
1774 struct thread *thread = NULL; 1829 struct thread *thread = NULL;
1775 struct dso *dso = NULL; 1830 struct dso *dso = NULL;
1831 struct map *map = NULL;
1776 int choice = 0; 1832 int choice = 0;
1833 int socked_id = -1;
1777 1834
1778 nr_options = 0; 1835 nr_options = 0;
1779 1836
@@ -1781,7 +1838,10 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1781 1838
1782 if (browser->he_selection != NULL) { 1839 if (browser->he_selection != NULL) {
1783 thread = hist_browser__selected_thread(browser); 1840 thread = hist_browser__selected_thread(browser);
1784 dso = browser->selection->map ? browser->selection->map->dso : NULL; 1841 map = browser->selection->map;
1842 if (map)
1843 dso = map->dso;
1844 socked_id = browser->he_selection->socket;
1785 } 1845 }
1786 switch (key) { 1846 switch (key) {
1787 case K_TAB: 1847 case K_TAB:
@@ -1824,9 +1884,14 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1824 actions->thread = thread; 1884 actions->thread = thread;
1825 do_zoom_thread(browser, actions); 1885 do_zoom_thread(browser, actions);
1826 continue; 1886 continue;
1887 case 'S':
1888 actions->socket = socked_id;
1889 do_zoom_socket(browser, actions);
1890 continue;
1827 case '/': 1891 case '/':
1828 if (ui_browser__input_window("Symbol to show", 1892 if (ui_browser__input_window("Symbol to show",
1829 "Please enter the name of symbol you want to see", 1893 "Please enter the name of symbol you want to see.\n"
1894 "To remove the filter later, press / + ENTER.",
1830 buf, "ENTER: OK, ESC: Cancel", 1895 buf, "ENTER: OK, ESC: Cancel",
1831 delay_secs * 2) == K_ENTER) { 1896 delay_secs * 2) == K_ENTER) {
1832 hists->symbol_filter_str = *buf ? buf : NULL; 1897 hists->symbol_filter_str = *buf ? buf : NULL;
@@ -1871,6 +1936,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1871 continue; 1936 continue;
1872 case K_ENTER: 1937 case K_ENTER:
1873 case K_RIGHT: 1938 case K_RIGHT:
1939 case 'm':
1874 /* menu */ 1940 /* menu */
1875 break; 1941 break;
1876 case K_ESC: 1942 case K_ESC:
@@ -1899,9 +1965,11 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1899 * Ditto for thread below. 1965 * Ditto for thread below.
1900 */ 1966 */
1901 do_zoom_dso(browser, actions); 1967 do_zoom_dso(browser, actions);
1902 } 1968 } else if (top == &browser->hists->thread_filter) {
1903 if (top == &browser->hists->thread_filter)
1904 do_zoom_thread(browser, actions); 1969 do_zoom_thread(browser, actions);
1970 } else if (top == &browser->hists->socket_filter) {
1971 do_zoom_socket(browser, actions);
1972 }
1905 continue; 1973 continue;
1906 } 1974 }
1907 case 'q': 1975 case 'q':
@@ -1965,12 +2033,14 @@ skip_annotation:
1965 nr_options += add_thread_opt(browser, &actions[nr_options], 2033 nr_options += add_thread_opt(browser, &actions[nr_options],
1966 &options[nr_options], thread); 2034 &options[nr_options], thread);
1967 nr_options += add_dso_opt(browser, &actions[nr_options], 2035 nr_options += add_dso_opt(browser, &actions[nr_options],
1968 &options[nr_options], dso); 2036 &options[nr_options], map);
1969 nr_options += add_map_opt(browser, &actions[nr_options], 2037 nr_options += add_map_opt(browser, &actions[nr_options],
1970 &options[nr_options], 2038 &options[nr_options],
1971 browser->selection ? 2039 browser->selection ?
1972 browser->selection->map : NULL); 2040 browser->selection->map : NULL);
1973 2041 nr_options += add_socket_opt(browser, &actions[nr_options],
2042 &options[nr_options],
2043 socked_id);
1974 /* perf script support */ 2044 /* perf script support */
1975 if (browser->he_selection) { 2045 if (browser->he_selection) {
1976 nr_options += add_script_opt(browser, 2046 nr_options += add_script_opt(browser,
diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c
index 8c154c7d4669..80912778bb6d 100644
--- a/tools/perf/ui/browsers/map.c
+++ b/tools/perf/ui/browsers/map.c
@@ -72,7 +72,7 @@ static int map_browser__run(struct map_browser *browser)
72 int key; 72 int key;
73 73
74 if (ui_browser__show(&browser->b, browser->map->dso->long_name, 74 if (ui_browser__show(&browser->b, browser->map->dso->long_name,
75 "Press <- or ESC to exit, %s / to search", 75 "Press ESC to exit, %s / to search",
76 verbose ? "" : "restart with -v to use") < 0) 76 verbose ? "" : "restart with -v to use") < 0)
77 return -1; 77 return -1;
78 78
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index e13b48d705ef..ad6b6ee3770e 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -89,7 +89,7 @@ static int script_browser__run(struct perf_script_browser *browser)
89 int key; 89 int key;
90 90
91 if (ui_browser__show(&browser->b, browser->script_name, 91 if (ui_browser__show(&browser->b, browser->script_name,
92 "Press <- or ESC to exit") < 0) 92 "Press ESC to exit") < 0)
93 return -1; 93 return -1;
94 94
95 while (1) { 95 while (1) {
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 25d608394d74..5029ba2b55af 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -463,27 +463,27 @@ void perf_hpp__init(void)
463 return; 463 return;
464 464
465 if (symbol_conf.cumulate_callchain) { 465 if (symbol_conf.cumulate_callchain) {
466 perf_hpp__column_enable(PERF_HPP__OVERHEAD_ACC); 466 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
467 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self"; 467 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
468 } 468 }
469 469
470 perf_hpp__column_enable(PERF_HPP__OVERHEAD); 470 hpp_dimension__add_output(PERF_HPP__OVERHEAD);
471 471
472 if (symbol_conf.show_cpu_utilization) { 472 if (symbol_conf.show_cpu_utilization) {
473 perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS); 473 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
474 perf_hpp__column_enable(PERF_HPP__OVERHEAD_US); 474 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
475 475
476 if (perf_guest) { 476 if (perf_guest) {
477 perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS); 477 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
478 perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US); 478 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
479 } 479 }
480 } 480 }
481 481
482 if (symbol_conf.show_nr_samples) 482 if (symbol_conf.show_nr_samples)
483 perf_hpp__column_enable(PERF_HPP__SAMPLES); 483 hpp_dimension__add_output(PERF_HPP__SAMPLES);
484 484
485 if (symbol_conf.show_total_period) 485 if (symbol_conf.show_total_period)
486 perf_hpp__column_enable(PERF_HPP__PERIOD); 486 hpp_dimension__add_output(PERF_HPP__PERIOD);
487 487
488 /* prepend overhead field for backward compatiblity. */ 488 /* prepend overhead field for backward compatiblity. */
489 list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list; 489 list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list;
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 60d1f29b4b50..7dfeba0a91f3 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -141,10 +141,6 @@ int ui__init(void)
141 141
142 SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB); 142 SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB);
143 143
144 ui_helpline__init();
145 ui_browser__init();
146 tui_progress__init();
147
148 signal(SIGSEGV, ui__signal_backtrace); 144 signal(SIGSEGV, ui__signal_backtrace);
149 signal(SIGFPE, ui__signal_backtrace); 145 signal(SIGFPE, ui__signal_backtrace);
150 signal(SIGINT, ui__signal); 146 signal(SIGINT, ui__signal);
@@ -153,6 +149,10 @@ int ui__init(void)
153 149
154 perf_error__register(&perf_tui_eops); 150 perf_error__register(&perf_tui_eops);
155 151
152 ui_helpline__init();
153 ui_browser__init();
154 tui_progress__init();
155
156 hist_browser__init_hpp(); 156 hist_browser__init_hpp();
157out: 157out:
158 return err; 158 return err;
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index e5f18a288b74..591b3fe3ed49 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -5,6 +5,7 @@ libperf-y += build-id.o
5libperf-y += config.o 5libperf-y += config.o
6libperf-y += ctype.o 6libperf-y += ctype.o
7libperf-y += db-export.o 7libperf-y += db-export.o
8libperf-y += env.o
8libperf-y += environment.o 9libperf-y += environment.o
9libperf-y += event.o 10libperf-y += event.o
10libperf-y += evlist.o 11libperf-y += evlist.o
@@ -86,6 +87,7 @@ libperf-$(CONFIG_AUXTRACE) += intel-bts.o
86libperf-y += parse-branch-options.o 87libperf-y += parse-branch-options.o
87libperf-y += parse-regs-options.o 88libperf-y += parse-regs-options.o
88 89
90libperf-$(CONFIG_LIBBPF) += bpf-loader.o
89libperf-$(CONFIG_LIBELF) += symbol-elf.o 91libperf-$(CONFIG_LIBELF) += symbol-elf.o
90libperf-$(CONFIG_LIBELF) += probe-file.o 92libperf-$(CONFIG_LIBELF) += probe-file.o
91libperf-$(CONFIG_LIBELF) += probe-event.o 93libperf-$(CONFIG_LIBELF) += probe-event.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index d1eece70b84d..0fc8d7a2fea5 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -548,8 +548,11 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
548 548
549 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); 549 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
550 550
551 if (addr < sym->start || addr >= sym->end) 551 if (addr < sym->start || addr >= sym->end) {
552 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
553 __func__, __LINE__, sym->name, sym->start, addr, sym->end);
552 return -ERANGE; 554 return -ERANGE;
555 }
553 556
554 offset = addr - sym->start; 557 offset = addr - sym->start;
555 h = annotation__histogram(notes, evidx); 558 h = annotation__histogram(notes, evidx);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index e9996092a093..cea323d9ee7e 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -122,7 +122,7 @@ struct annotated_source {
122 struct list_head source; 122 struct list_head source;
123 struct source_line *lines; 123 struct source_line *lines;
124 int nr_histograms; 124 int nr_histograms;
125 int sizeof_sym_hist; 125 size_t sizeof_sym_hist;
126 struct cyc_hist *cycles_hist; 126 struct cyc_hist *cycles_hist;
127 struct sym_hist histograms[0]; 127 struct sym_hist histograms[0];
128}; 128};
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index a980e7c50ee0..7f10430af39c 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -926,6 +926,8 @@ s64 perf_event__process_auxtrace(struct perf_tool *tool,
926#define PERF_ITRACE_DEFAULT_PERIOD 100000 926#define PERF_ITRACE_DEFAULT_PERIOD 100000
927#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16 927#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
928#define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024 928#define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
929#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
930#define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
929 931
930void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts) 932void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
931{ 933{
@@ -936,6 +938,7 @@ void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
936 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE; 938 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
937 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; 939 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
938 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; 940 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
941 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
939} 942}
940 943
941/* 944/*
@@ -950,6 +953,7 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
950 const char *p; 953 const char *p;
951 char *endptr; 954 char *endptr;
952 bool period_type_set = false; 955 bool period_type_set = false;
956 bool period_set = false;
953 957
954 synth_opts->set = true; 958 synth_opts->set = true;
955 959
@@ -971,6 +975,7 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
971 p += 1; 975 p += 1;
972 if (isdigit(*p)) { 976 if (isdigit(*p)) {
973 synth_opts->period = strtoull(p, &endptr, 10); 977 synth_opts->period = strtoull(p, &endptr, 10);
978 period_set = true;
974 p = endptr; 979 p = endptr;
975 while (*p == ' ' || *p == ',') 980 while (*p == ' ' || *p == ',')
976 p += 1; 981 p += 1;
@@ -1041,6 +1046,23 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
1041 synth_opts->callchain_sz = val; 1046 synth_opts->callchain_sz = val;
1042 } 1047 }
1043 break; 1048 break;
1049 case 'l':
1050 synth_opts->last_branch = true;
1051 synth_opts->last_branch_sz =
1052 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1053 while (*p == ' ' || *p == ',')
1054 p += 1;
1055 if (isdigit(*p)) {
1056 unsigned int val;
1057
1058 val = strtoul(p, &endptr, 10);
1059 p = endptr;
1060 if (!val ||
1061 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1062 goto out_err;
1063 synth_opts->last_branch_sz = val;
1064 }
1065 break;
1044 case ' ': 1066 case ' ':
1045 case ',': 1067 case ',':
1046 break; 1068 break;
@@ -1053,7 +1075,7 @@ out:
1053 if (!period_type_set) 1075 if (!period_type_set)
1054 synth_opts->period_type = 1076 synth_opts->period_type =
1055 PERF_ITRACE_DEFAULT_PERIOD_TYPE; 1077 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1056 if (!synth_opts->period) 1078 if (!period_set)
1057 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; 1079 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1058 } 1080 }
1059 1081
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index bf72b77a588a..b86f90db1352 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -63,7 +63,9 @@ enum itrace_period_type {
63 * @calls: limit branch samples to calls (can be combined with @returns) 63 * @calls: limit branch samples to calls (can be combined with @returns)
64 * @returns: limit branch samples to returns (can be combined with @calls) 64 * @returns: limit branch samples to returns (can be combined with @calls)
65 * @callchain: add callchain to 'instructions' events 65 * @callchain: add callchain to 'instructions' events
66 * @last_branch: add branch context to 'instruction' events
66 * @callchain_sz: maximum callchain size 67 * @callchain_sz: maximum callchain size
68 * @last_branch_sz: branch context size
67 * @period: 'instructions' events period 69 * @period: 'instructions' events period
68 * @period_type: 'instructions' events period type 70 * @period_type: 'instructions' events period type
69 */ 71 */
@@ -79,7 +81,9 @@ struct itrace_synth_opts {
79 bool calls; 81 bool calls;
80 bool returns; 82 bool returns;
81 bool callchain; 83 bool callchain;
84 bool last_branch;
82 unsigned int callchain_sz; 85 unsigned int callchain_sz;
86 unsigned int last_branch_sz;
83 unsigned long long period; 87 unsigned long long period;
84 enum itrace_period_type period_type; 88 enum itrace_period_type period_type;
85}; 89};
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
new file mode 100644
index 000000000000..ba6f7526b282
--- /dev/null
+++ b/tools/perf/util/bpf-loader.c
@@ -0,0 +1,352 @@
1/*
2 * bpf-loader.c
3 *
4 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
5 * Copyright (C) 2015 Huawei Inc.
6 */
7
8#include <bpf/libbpf.h>
9#include <linux/err.h>
10#include "perf.h"
11#include "debug.h"
12#include "bpf-loader.h"
13#include "probe-event.h"
14#include "probe-finder.h" // for MAX_PROBES
15#include "llvm-utils.h"
16
17#define DEFINE_PRINT_FN(name, level) \
18static int libbpf_##name(const char *fmt, ...) \
19{ \
20 va_list args; \
21 int ret; \
22 \
23 va_start(args, fmt); \
24 ret = veprintf(level, verbose, pr_fmt(fmt), args);\
25 va_end(args); \
26 return ret; \
27}
28
29DEFINE_PRINT_FN(warning, 0)
30DEFINE_PRINT_FN(info, 0)
31DEFINE_PRINT_FN(debug, 1)
32
33struct bpf_prog_priv {
34 struct perf_probe_event pev;
35};
36
37struct bpf_object *bpf__prepare_load(const char *filename, bool source)
38{
39 struct bpf_object *obj;
40 static bool libbpf_initialized;
41
42 if (!libbpf_initialized) {
43 libbpf_set_print(libbpf_warning,
44 libbpf_info,
45 libbpf_debug);
46 libbpf_initialized = true;
47 }
48
49 if (source) {
50 int err;
51 void *obj_buf;
52 size_t obj_buf_sz;
53
54 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
55 if (err)
56 return ERR_PTR(err);
57 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
58 free(obj_buf);
59 } else
60 obj = bpf_object__open(filename);
61
62 if (!obj) {
63 pr_debug("bpf: failed to load %s\n", filename);
64 return ERR_PTR(-EINVAL);
65 }
66
67 return obj;
68}
69
70void bpf__clear(void)
71{
72 struct bpf_object *obj, *tmp;
73
74 bpf_object__for_each_safe(obj, tmp) {
75 bpf__unprobe(obj);
76 bpf_object__close(obj);
77 }
78}
79
80static void
81bpf_prog_priv__clear(struct bpf_program *prog __maybe_unused,
82 void *_priv)
83{
84 struct bpf_prog_priv *priv = _priv;
85
86 cleanup_perf_probe_events(&priv->pev, 1);
87 free(priv);
88}
89
90static int
91config_bpf_program(struct bpf_program *prog)
92{
93 struct perf_probe_event *pev = NULL;
94 struct bpf_prog_priv *priv = NULL;
95 const char *config_str;
96 int err;
97
98 config_str = bpf_program__title(prog, false);
99 if (!config_str) {
100 pr_debug("bpf: unable to get title for program\n");
101 return -EINVAL;
102 }
103
104 priv = calloc(sizeof(*priv), 1);
105 if (!priv) {
106 pr_debug("bpf: failed to alloc priv\n");
107 return -ENOMEM;
108 }
109 pev = &priv->pev;
110
111 pr_debug("bpf: config program '%s'\n", config_str);
112 err = parse_perf_probe_command(config_str, pev);
113 if (err < 0) {
114 pr_debug("bpf: '%s' is not a valid config string\n",
115 config_str);
116 err = -EINVAL;
117 goto errout;
118 }
119
120 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
121 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
122 config_str, PERF_BPF_PROBE_GROUP);
123 err = -EINVAL;
124 goto errout;
125 } else if (!pev->group)
126 pev->group = strdup(PERF_BPF_PROBE_GROUP);
127
128 if (!pev->group) {
129 pr_debug("bpf: strdup failed\n");
130 err = -ENOMEM;
131 goto errout;
132 }
133
134 if (!pev->event) {
135 pr_debug("bpf: '%s': event name is missing\n",
136 config_str);
137 err = -EINVAL;
138 goto errout;
139 }
140 pr_debug("bpf: config '%s' is ok\n", config_str);
141
142 err = bpf_program__set_private(prog, priv, bpf_prog_priv__clear);
143 if (err) {
144 pr_debug("Failed to set priv for program '%s'\n", config_str);
145 goto errout;
146 }
147
148 return 0;
149
150errout:
151 if (pev)
152 clear_perf_probe_event(pev);
153 free(priv);
154 return err;
155}
156
157static int bpf__prepare_probe(void)
158{
159 static int err = 0;
160 static bool initialized = false;
161
162 /*
163 * Make err static, so if init failed the first, bpf__prepare_probe()
164 * fails each time without calling init_probe_symbol_maps multiple
165 * times.
166 */
167 if (initialized)
168 return err;
169
170 initialized = true;
171 err = init_probe_symbol_maps(false);
172 if (err < 0)
173 pr_debug("Failed to init_probe_symbol_maps\n");
174 probe_conf.max_probes = MAX_PROBES;
175 return err;
176}
177
178int bpf__probe(struct bpf_object *obj)
179{
180 int err = 0;
181 struct bpf_program *prog;
182 struct bpf_prog_priv *priv;
183 struct perf_probe_event *pev;
184
185 err = bpf__prepare_probe();
186 if (err) {
187 pr_debug("bpf__prepare_probe failed\n");
188 return err;
189 }
190
191 bpf_object__for_each_program(prog, obj) {
192 err = config_bpf_program(prog);
193 if (err)
194 goto out;
195
196 err = bpf_program__get_private(prog, (void **)&priv);
197 if (err || !priv)
198 goto out;
199 pev = &priv->pev;
200
201 err = convert_perf_probe_events(pev, 1);
202 if (err < 0) {
203 pr_debug("bpf_probe: failed to convert perf probe events");
204 goto out;
205 }
206
207 err = apply_perf_probe_events(pev, 1);
208 if (err < 0) {
209 pr_debug("bpf_probe: failed to apply perf probe events");
210 goto out;
211 }
212 }
213out:
214 return err < 0 ? err : 0;
215}
216
217#define EVENTS_WRITE_BUFSIZE 4096
218int bpf__unprobe(struct bpf_object *obj)
219{
220 int err, ret = 0;
221 struct bpf_program *prog;
222 struct bpf_prog_priv *priv;
223
224 bpf_object__for_each_program(prog, obj) {
225 int i;
226
227 err = bpf_program__get_private(prog, (void **)&priv);
228 if (err || !priv)
229 continue;
230
231 for (i = 0; i < priv->pev.ntevs; i++) {
232 struct probe_trace_event *tev = &priv->pev.tevs[i];
233 char name_buf[EVENTS_WRITE_BUFSIZE];
234 struct strfilter *delfilter;
235
236 snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
237 "%s:%s", tev->group, tev->event);
238 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
239
240 delfilter = strfilter__new(name_buf, NULL);
241 if (!delfilter) {
242 pr_debug("Failed to create filter for unprobing\n");
243 ret = -ENOMEM;
244 continue;
245 }
246
247 err = del_perf_probe_events(delfilter);
248 strfilter__delete(delfilter);
249 if (err) {
250 pr_debug("Failed to delete %s\n", name_buf);
251 ret = err;
252 continue;
253 }
254 }
255 }
256 return ret;
257}
258
259int bpf__load(struct bpf_object *obj)
260{
261 int err;
262
263 err = bpf_object__load(obj);
264 if (err) {
265 pr_debug("bpf: load objects failed\n");
266 return err;
267 }
268 return 0;
269}
270
271int bpf__foreach_tev(struct bpf_object *obj,
272 bpf_prog_iter_callback_t func,
273 void *arg)
274{
275 struct bpf_program *prog;
276 int err;
277
278 bpf_object__for_each_program(prog, obj) {
279 struct probe_trace_event *tev;
280 struct perf_probe_event *pev;
281 struct bpf_prog_priv *priv;
282 int i, fd;
283
284 err = bpf_program__get_private(prog,
285 (void **)&priv);
286 if (err || !priv) {
287 pr_debug("bpf: failed to get private field\n");
288 return -EINVAL;
289 }
290
291 pev = &priv->pev;
292 for (i = 0; i < pev->ntevs; i++) {
293 tev = &pev->tevs[i];
294
295 fd = bpf_program__fd(prog);
296 if (fd < 0) {
297 pr_debug("bpf: failed to get file descriptor\n");
298 return fd;
299 }
300
301 err = (*func)(tev, fd, arg);
302 if (err) {
303 pr_debug("bpf: call back failed, stop iterate\n");
304 return err;
305 }
306 }
307 }
308 return 0;
309}
310
311#define bpf__strerror_head(err, buf, size) \
312 char sbuf[STRERR_BUFSIZE], *emsg;\
313 if (!size)\
314 return 0;\
315 if (err < 0)\
316 err = -err;\
317 emsg = strerror_r(err, sbuf, sizeof(sbuf));\
318 switch (err) {\
319 default:\
320 scnprintf(buf, size, "%s", emsg);\
321 break;
322
323#define bpf__strerror_entry(val, fmt...)\
324 case val: {\
325 scnprintf(buf, size, fmt);\
326 break;\
327 }
328
329#define bpf__strerror_end(buf, size)\
330 }\
331 buf[size - 1] = '\0';
332
333int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
334 int err, char *buf, size_t size)
335{
336 bpf__strerror_head(err, buf, size);
337 bpf__strerror_entry(EEXIST, "Probe point exist. Try use 'perf probe -d \"*\"'");
338 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0\n");
339 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file\n");
340 bpf__strerror_end(buf, size);
341 return 0;
342}
343
344int bpf__strerror_load(struct bpf_object *obj __maybe_unused,
345 int err, char *buf, size_t size)
346{
347 bpf__strerror_head(err, buf, size);
348 bpf__strerror_entry(EINVAL, "%s: Are you root and runing a CONFIG_BPF_SYSCALL kernel?",
349 emsg)
350 bpf__strerror_end(buf, size);
351 return 0;
352}
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
new file mode 100644
index 000000000000..ccd8d7fd79d3
--- /dev/null
+++ b/tools/perf/util/bpf-loader.h
@@ -0,0 +1,85 @@
1/*
2 * Copyright (C) 2015, Wang Nan <wangnan0@huawei.com>
3 * Copyright (C) 2015, Huawei Inc.
4 */
5#ifndef __BPF_LOADER_H
6#define __BPF_LOADER_H
7
8#include <linux/compiler.h>
9#include <linux/err.h>
10#include <string.h>
11#include "probe-event.h"
12#include "debug.h"
13
14struct bpf_object;
15#define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
16
17typedef int (*bpf_prog_iter_callback_t)(struct probe_trace_event *tev,
18 int fd, void *arg);
19
20#ifdef HAVE_LIBBPF_SUPPORT
21struct bpf_object *bpf__prepare_load(const char *filename, bool source);
22
23void bpf__clear(void);
24
25int bpf__probe(struct bpf_object *obj);
26int bpf__unprobe(struct bpf_object *obj);
27int bpf__strerror_probe(struct bpf_object *obj, int err,
28 char *buf, size_t size);
29
30int bpf__load(struct bpf_object *obj);
31int bpf__strerror_load(struct bpf_object *obj, int err,
32 char *buf, size_t size);
33int bpf__foreach_tev(struct bpf_object *obj,
34 bpf_prog_iter_callback_t func, void *arg);
35#else
36static inline struct bpf_object *
37bpf__prepare_load(const char *filename __maybe_unused,
38 bool source __maybe_unused)
39{
40 pr_debug("ERROR: eBPF object loading is disabled during compiling.\n");
41 return ERR_PTR(-ENOTSUP);
42}
43
44static inline void bpf__clear(void) { }
45
46static inline int bpf__probe(struct bpf_object *obj __maybe_unused) { return 0;}
47static inline int bpf__unprobe(struct bpf_object *obj __maybe_unused) { return 0;}
48static inline int bpf__load(struct bpf_object *obj __maybe_unused) { return 0; }
49
50static inline int
51bpf__foreach_tev(struct bpf_object *obj __maybe_unused,
52 bpf_prog_iter_callback_t func __maybe_unused,
53 void *arg __maybe_unused)
54{
55 return 0;
56}
57
58static inline int
59__bpf_strerror(char *buf, size_t size)
60{
61 if (!size)
62 return 0;
63 strncpy(buf,
64 "ERROR: eBPF object loading is disabled during compiling.\n",
65 size);
66 buf[size - 1] = '\0';
67 return 0;
68}
69
70static inline int
71bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
72 int err __maybe_unused,
73 char *buf, size_t size)
74{
75 return __bpf_strerror(buf, size);
76}
77
78static inline int bpf__strerror_load(struct bpf_object *obj __maybe_unused,
79 int err __maybe_unused,
80 char *buf, size_t size)
81{
82 return __bpf_strerror(buf, size);
83}
84#endif
85#endif
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 773fe13ce627..735ad48e1858 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -51,10 +51,12 @@ static int parse_callchain_order(const char *value)
51{ 51{
52 if (!strncmp(value, "caller", strlen(value))) { 52 if (!strncmp(value, "caller", strlen(value))) {
53 callchain_param.order = ORDER_CALLER; 53 callchain_param.order = ORDER_CALLER;
54 callchain_param.order_set = true;
54 return 0; 55 return 0;
55 } 56 }
56 if (!strncmp(value, "callee", strlen(value))) { 57 if (!strncmp(value, "callee", strlen(value))) {
57 callchain_param.order = ORDER_CALLEE; 58 callchain_param.order = ORDER_CALLEE;
59 callchain_param.order_set = true;
58 return 0; 60 return 0;
59 } 61 }
60 return -1; 62 return -1;
@@ -77,12 +79,14 @@ static int parse_callchain_sort_key(const char *value)
77 return -1; 79 return -1;
78} 80}
79 81
80int 82static int
81parse_callchain_report_opt(const char *arg) 83__parse_callchain_report_opt(const char *arg, bool allow_record_opt)
82{ 84{
83 char *tok; 85 char *tok;
84 char *endptr; 86 char *endptr;
85 bool minpcnt_set = false; 87 bool minpcnt_set = false;
88 bool record_opt_set = false;
89 bool try_stack_size = false;
86 90
87 symbol_conf.use_callchain = true; 91 symbol_conf.use_callchain = true;
88 92
@@ -100,6 +104,28 @@ parse_callchain_report_opt(const char *arg)
100 !parse_callchain_order(tok) || 104 !parse_callchain_order(tok) ||
101 !parse_callchain_sort_key(tok)) { 105 !parse_callchain_sort_key(tok)) {
102 /* parsing ok - move on to the next */ 106 /* parsing ok - move on to the next */
107 try_stack_size = false;
108 goto next;
109 } else if (allow_record_opt && !record_opt_set) {
110 if (parse_callchain_record(tok, &callchain_param))
111 goto try_numbers;
112
113 /* assume that number followed by 'dwarf' is stack size */
114 if (callchain_param.record_mode == CALLCHAIN_DWARF)
115 try_stack_size = true;
116
117 record_opt_set = true;
118 goto next;
119 }
120
121try_numbers:
122 if (try_stack_size) {
123 unsigned long size = 0;
124
125 if (get_stack_size(tok, &size) < 0)
126 return -1;
127 callchain_param.dump_size = size;
128 try_stack_size = false;
103 } else if (!minpcnt_set) { 129 } else if (!minpcnt_set) {
104 /* try to get the min percent */ 130 /* try to get the min percent */
105 callchain_param.min_percent = strtod(tok, &endptr); 131 callchain_param.min_percent = strtod(tok, &endptr);
@@ -112,7 +138,7 @@ parse_callchain_report_opt(const char *arg)
112 if (tok == endptr) 138 if (tok == endptr)
113 return -1; 139 return -1;
114 } 140 }
115 141next:
116 arg = NULL; 142 arg = NULL;
117 } 143 }
118 144
@@ -123,6 +149,16 @@ parse_callchain_report_opt(const char *arg)
123 return 0; 149 return 0;
124} 150}
125 151
152int parse_callchain_report_opt(const char *arg)
153{
154 return __parse_callchain_report_opt(arg, false);
155}
156
157int parse_callchain_top_opt(const char *arg)
158{
159 return __parse_callchain_report_opt(arg, true);
160}
161
126int perf_callchain_config(const char *var, const char *value) 162int perf_callchain_config(const char *var, const char *value)
127{ 163{
128 char *endptr; 164 char *endptr;
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index acee2b3cd801..fce8161e54db 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -7,6 +7,30 @@
7#include "event.h" 7#include "event.h"
8#include "symbol.h" 8#include "symbol.h"
9 9
10#define HELP_PAD "\t\t\t\t"
11
12#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n"
13
14#ifdef HAVE_DWARF_UNWIND_SUPPORT
15# define RECORD_MODE_HELP HELP_PAD "record_mode:\tcall graph recording mode (fp|dwarf|lbr)\n"
16#else
17# define RECORD_MODE_HELP HELP_PAD "record_mode:\tcall graph recording mode (fp|lbr)\n"
18#endif
19
20#define RECORD_SIZE_HELP \
21 HELP_PAD "record_size:\tif record_mode is 'dwarf', max size of stack recording (<bytes>)\n" \
22 HELP_PAD "\t\tdefault: 8192 (bytes)\n"
23
24#define CALLCHAIN_RECORD_HELP CALLCHAIN_HELP RECORD_MODE_HELP RECORD_SIZE_HELP
25
26#define CALLCHAIN_REPORT_HELP \
27 HELP_PAD "print_type:\tcall graph printing style (graph|flat|fractal|none)\n" \
28 HELP_PAD "threshold:\tminimum call graph inclusion threshold (<percent>)\n" \
29 HELP_PAD "print_limit:\tmaximum number of call graph entry (<number>)\n" \
30 HELP_PAD "order:\t\tcall graph order (caller|callee)\n" \
31 HELP_PAD "sort_key:\tcall graph sort key (function|address)\n" \
32 HELP_PAD "branch:\t\tinclude last branch info to call graph (branch)\n"
33
10enum perf_call_graph_mode { 34enum perf_call_graph_mode {
11 CALLCHAIN_NONE, 35 CALLCHAIN_NONE,
12 CALLCHAIN_FP, 36 CALLCHAIN_FP,
@@ -63,6 +87,7 @@ struct callchain_param {
63 double min_percent; 87 double min_percent;
64 sort_chain_func_t sort; 88 sort_chain_func_t sort;
65 enum chain_order order; 89 enum chain_order order;
90 bool order_set;
66 enum chain_key key; 91 enum chain_key key;
67 bool branch_callstack; 92 bool branch_callstack;
68}; 93};
@@ -180,6 +205,7 @@ extern const char record_callchain_help[];
180extern int parse_callchain_record(const char *arg, struct callchain_param *param); 205extern int parse_callchain_record(const char *arg, struct callchain_param *param);
181int parse_callchain_record_opt(const char *arg, struct callchain_param *param); 206int parse_callchain_record_opt(const char *arg, struct callchain_param *param);
182int parse_callchain_report_opt(const char *arg); 207int parse_callchain_report_opt(const char *arg);
208int parse_callchain_top_opt(const char *arg);
183int perf_callchain_config(const char *var, const char *value); 209int perf_callchain_config(const char *var, const char *value);
184 210
185static inline void callchain_cursor_snapshot(struct callchain_cursor *dest, 211static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 3667e2123e5b..10af1e7524fb 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -203,6 +203,23 @@ struct cpu_map *cpu_map__dummy_new(void)
203 return cpus; 203 return cpus;
204} 204}
205 205
206struct cpu_map *cpu_map__empty_new(int nr)
207{
208 struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
209
210 if (cpus != NULL) {
211 int i;
212
213 cpus->nr = nr;
214 for (i = 0; i < nr; i++)
215 cpus->map[i] = -1;
216
217 atomic_set(&cpus->refcnt, 1);
218 }
219
220 return cpus;
221}
222
206static void cpu_map__delete(struct cpu_map *map) 223static void cpu_map__delete(struct cpu_map *map)
207{ 224{
208 if (map) { 225 if (map) {
@@ -225,32 +242,32 @@ void cpu_map__put(struct cpu_map *map)
225 cpu_map__delete(map); 242 cpu_map__delete(map);
226} 243}
227 244
228int cpu_map__get_socket(struct cpu_map *map, int idx) 245static int cpu__get_topology_int(int cpu, const char *name, int *value)
229{ 246{
230 FILE *fp;
231 const char *mnt;
232 char path[PATH_MAX]; 247 char path[PATH_MAX];
233 int cpu, ret;
234 248
235 if (idx > map->nr) 249 snprintf(path, PATH_MAX,
236 return -1; 250 "devices/system/cpu/cpu%d/topology/%s", cpu, name);
237 251
238 cpu = map->map[idx]; 252 return sysfs__read_int(path, value);
253}
239 254
240 mnt = sysfs__mountpoint(); 255int cpu_map__get_socket_id(int cpu)
241 if (!mnt) 256{
242 return -1; 257 int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
258 return ret ?: value;
259}
243 260
244 snprintf(path, PATH_MAX, 261int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused)
245 "%s/devices/system/cpu/cpu%d/topology/physical_package_id", 262{
246 mnt, cpu); 263 int cpu;
247 264
248 fp = fopen(path, "r"); 265 if (idx > map->nr)
249 if (!fp)
250 return -1; 266 return -1;
251 ret = fscanf(fp, "%d", &cpu); 267
252 fclose(fp); 268 cpu = map->map[idx];
253 return ret == 1 ? cpu : -1; 269
270 return cpu_map__get_socket_id(cpu);
254} 271}
255 272
256static int cmp_ids(const void *a, const void *b) 273static int cmp_ids(const void *a, const void *b)
@@ -258,8 +275,9 @@ static int cmp_ids(const void *a, const void *b)
258 return *(int *)a - *(int *)b; 275 return *(int *)a - *(int *)b;
259} 276}
260 277
261static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, 278int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
262 int (*f)(struct cpu_map *map, int cpu)) 279 int (*f)(struct cpu_map *map, int cpu, void *data),
280 void *data)
263{ 281{
264 struct cpu_map *c; 282 struct cpu_map *c;
265 int nr = cpus->nr; 283 int nr = cpus->nr;
@@ -271,7 +289,7 @@ static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
271 return -1; 289 return -1;
272 290
273 for (cpu = 0; cpu < nr; cpu++) { 291 for (cpu = 0; cpu < nr; cpu++) {
274 s1 = f(cpus, cpu); 292 s1 = f(cpus, cpu, data);
275 for (s2 = 0; s2 < c->nr; s2++) { 293 for (s2 = 0; s2 < c->nr; s2++) {
276 if (s1 == c->map[s2]) 294 if (s1 == c->map[s2])
277 break; 295 break;
@@ -284,40 +302,29 @@ static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
284 /* ensure we process id in increasing order */ 302 /* ensure we process id in increasing order */
285 qsort(c->map, c->nr, sizeof(int), cmp_ids); 303 qsort(c->map, c->nr, sizeof(int), cmp_ids);
286 304
287 atomic_set(&cpus->refcnt, 1); 305 atomic_set(&c->refcnt, 1);
288 *res = c; 306 *res = c;
289 return 0; 307 return 0;
290} 308}
291 309
292int cpu_map__get_core(struct cpu_map *map, int idx) 310int cpu_map__get_core_id(int cpu)
293{ 311{
294 FILE *fp; 312 int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
295 const char *mnt; 313 return ret ?: value;
296 char path[PATH_MAX]; 314}
297 int cpu, ret, s; 315
316int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
317{
318 int cpu, s;
298 319
299 if (idx > map->nr) 320 if (idx > map->nr)
300 return -1; 321 return -1;
301 322
302 cpu = map->map[idx]; 323 cpu = map->map[idx];
303 324
304 mnt = sysfs__mountpoint(); 325 cpu = cpu_map__get_core_id(cpu);
305 if (!mnt)
306 return -1;
307
308 snprintf(path, PATH_MAX,
309 "%s/devices/system/cpu/cpu%d/topology/core_id",
310 mnt, cpu);
311
312 fp = fopen(path, "r");
313 if (!fp)
314 return -1;
315 ret = fscanf(fp, "%d", &cpu);
316 fclose(fp);
317 if (ret != 1)
318 return -1;
319 326
320 s = cpu_map__get_socket(map, idx); 327 s = cpu_map__get_socket(map, idx, data);
321 if (s == -1) 328 if (s == -1)
322 return -1; 329 return -1;
323 330
@@ -332,12 +339,12 @@ int cpu_map__get_core(struct cpu_map *map, int idx)
332 339
333int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp) 340int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
334{ 341{
335 return cpu_map__build_map(cpus, sockp, cpu_map__get_socket); 342 return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
336} 343}
337 344
338int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep) 345int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
339{ 346{
340 return cpu_map__build_map(cpus, corep, cpu_map__get_core); 347 return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
341} 348}
342 349
343/* setup simple routines to easily access node numbers given a cpu number */ 350/* setup simple routines to easily access node numbers given a cpu number */
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 0af9cecb4c51..85f7772457fa 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -15,11 +15,14 @@ struct cpu_map {
15}; 15};
16 16
17struct cpu_map *cpu_map__new(const char *cpu_list); 17struct cpu_map *cpu_map__new(const char *cpu_list);
18struct cpu_map *cpu_map__empty_new(int nr);
18struct cpu_map *cpu_map__dummy_new(void); 19struct cpu_map *cpu_map__dummy_new(void);
19struct cpu_map *cpu_map__read(FILE *file); 20struct cpu_map *cpu_map__read(FILE *file);
20size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); 21size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
21int cpu_map__get_socket(struct cpu_map *map, int idx); 22int cpu_map__get_socket_id(int cpu);
22int cpu_map__get_core(struct cpu_map *map, int idx); 23int cpu_map__get_socket(struct cpu_map *map, int idx, void *data);
24int cpu_map__get_core_id(int cpu);
25int cpu_map__get_core(struct cpu_map *map, int idx, void *data);
23int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp); 26int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
24int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep); 27int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
25 28
@@ -85,4 +88,7 @@ static inline int cpu__get_node(int cpu)
85 return cpunode_map[cpu]; 88 return cpunode_map[cpu];
86} 89}
87 90
91int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
92 int (*f)(struct cpu_map *map, int cpu, void *data),
93 void *data);
88#endif /* __PERF_CPUMAP_H */ 94#endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
new file mode 100644
index 000000000000..6af4f7c36820
--- /dev/null
+++ b/tools/perf/util/env.c
@@ -0,0 +1,86 @@
1#include "cpumap.h"
2#include "env.h"
3#include "util.h"
4
5struct perf_env perf_env;
6
7void perf_env__exit(struct perf_env *env)
8{
9 zfree(&env->hostname);
10 zfree(&env->os_release);
11 zfree(&env->version);
12 zfree(&env->arch);
13 zfree(&env->cpu_desc);
14 zfree(&env->cpuid);
15 zfree(&env->cmdline);
16 zfree(&env->cmdline_argv);
17 zfree(&env->sibling_cores);
18 zfree(&env->sibling_threads);
19 zfree(&env->numa_nodes);
20 zfree(&env->pmu_mappings);
21 zfree(&env->cpu);
22}
23
24int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
25{
26 int i;
27
28 /*
29 * If env->cmdline_argv has already been set, do not override it. This allows
30 * a command to set the cmdline, parse args and then call another
31 * builtin function that implements a command -- e.g, cmd_kvm calling
32 * cmd_record.
33 */
34 if (env->cmdline_argv != NULL)
35 return 0;
36
37 /* do not include NULL termination */
38 env->cmdline_argv = calloc(argc, sizeof(char *));
39 if (env->cmdline_argv == NULL)
40 goto out_enomem;
41
42 /*
43 * Must copy argv contents because it gets moved around during option
44 * parsing:
45 */
46 for (i = 0; i < argc ; i++) {
47 env->cmdline_argv[i] = argv[i];
48 if (env->cmdline_argv[i] == NULL)
49 goto out_free;
50 }
51
52 env->nr_cmdline = argc;
53
54 return 0;
55out_free:
56 zfree(&env->cmdline_argv);
57out_enomem:
58 return -ENOMEM;
59}
60
61int perf_env__read_cpu_topology_map(struct perf_env *env)
62{
63 int cpu, nr_cpus;
64
65 if (env->cpu != NULL)
66 return 0;
67
68 if (env->nr_cpus_avail == 0)
69 env->nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF);
70
71 nr_cpus = env->nr_cpus_avail;
72 if (nr_cpus == -1)
73 return -EINVAL;
74
75 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
76 if (env->cpu == NULL)
77 return -ENOMEM;
78
79 for (cpu = 0; cpu < nr_cpus; ++cpu) {
80 env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
81 env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
82 }
83
84 env->nr_cpus_avail = nr_cpus;
85 return 0;
86}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
new file mode 100644
index 000000000000..0132b9557c02
--- /dev/null
+++ b/tools/perf/util/env.h
@@ -0,0 +1,44 @@
1#ifndef __PERF_ENV_H
2#define __PERF_ENV_H
3
4struct cpu_topology_map {
5 int socket_id;
6 int core_id;
7};
8
9struct perf_env {
10 char *hostname;
11 char *os_release;
12 char *version;
13 char *arch;
14 int nr_cpus_online;
15 int nr_cpus_avail;
16 char *cpu_desc;
17 char *cpuid;
18 unsigned long long total_mem;
19 unsigned int msr_pmu_type;
20
21 int nr_cmdline;
22 int nr_sibling_cores;
23 int nr_sibling_threads;
24 int nr_numa_nodes;
25 int nr_pmu_mappings;
26 int nr_groups;
27 char *cmdline;
28 const char **cmdline_argv;
29 char *sibling_cores;
30 char *sibling_threads;
31 char *numa_nodes;
32 char *pmu_mappings;
33 struct cpu_topology_map *cpu;
34};
35
36extern struct perf_env perf_env;
37
38void perf_env__exit(struct perf_env *env);
39
40int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
41
42int perf_env__read_cpu_topology_map(struct perf_env *env);
43
44#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 7ff61274ed57..8b10621b415c 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -67,7 +67,8 @@ static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
67 char filename[PATH_MAX]; 67 char filename[PATH_MAX];
68 char bf[4096]; 68 char bf[4096];
69 int fd; 69 int fd;
70 size_t size = 0, n; 70 size_t size = 0;
71 ssize_t n;
71 char *nl, *name, *tgids, *ppids; 72 char *nl, *name, *tgids, *ppids;
72 73
73 *tgid = -1; 74 *tgid = -1;
@@ -167,7 +168,7 @@ static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
167 return 0; 168 return 0;
168} 169}
169 170
170static pid_t perf_event__synthesize_comm(struct perf_tool *tool, 171pid_t perf_event__synthesize_comm(struct perf_tool *tool,
171 union perf_event *event, pid_t pid, 172 union perf_event *event, pid_t pid,
172 perf_event__handler_t process, 173 perf_event__handler_t process,
173 struct machine *machine) 174 struct machine *machine)
@@ -378,7 +379,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
378 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 379 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
379 size_t size; 380 size_t size;
380 381
381 if (pos->dso->kernel) 382 if (__map__is_kernel(pos))
382 continue; 383 continue;
383 384
384 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 385 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
@@ -649,12 +650,12 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
649 size_t size; 650 size_t size;
650 const char *mmap_name; 651 const char *mmap_name;
651 char name_buff[PATH_MAX]; 652 char name_buff[PATH_MAX];
652 struct map *map; 653 struct map *map = machine__kernel_map(machine);
653 struct kmap *kmap; 654 struct kmap *kmap;
654 int err; 655 int err;
655 union perf_event *event; 656 union perf_event *event;
656 657
657 if (machine->vmlinux_maps[0] == NULL) 658 if (map == NULL)
658 return -1; 659 return -1;
659 660
660 /* 661 /*
@@ -680,7 +681,6 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
680 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 681 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
681 } 682 }
682 683
683 map = machine->vmlinux_maps[MAP__FUNCTION];
684 kmap = map__kmap(map); 684 kmap = map__kmap(map);
685 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 685 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
686 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; 686 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
@@ -1008,7 +1008,7 @@ int perf_event__preprocess_sample(const union perf_event *event,
1008 * it now. 1008 * it now.
1009 */ 1009 */
1010 if (cpumode == PERF_RECORD_MISC_KERNEL && 1010 if (cpumode == PERF_RECORD_MISC_KERNEL &&
1011 machine->vmlinux_maps[MAP__FUNCTION] == NULL) 1011 machine__kernel_map(machine) == NULL)
1012 machine__create_kernel_maps(machine); 1012 machine__create_kernel_maps(machine);
1013 1013
1014 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al); 1014 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al);
@@ -1021,6 +1021,14 @@ int perf_event__preprocess_sample(const union perf_event *event,
1021 1021
1022 al->sym = NULL; 1022 al->sym = NULL;
1023 al->cpu = sample->cpu; 1023 al->cpu = sample->cpu;
1024 al->socket = -1;
1025
1026 if (al->cpu >= 0) {
1027 struct perf_env *env = machine->env;
1028
1029 if (env && env->cpu)
1030 al->socket = env->cpu[al->cpu].socket_id;
1031 }
1024 1032
1025 if (al->map) { 1033 if (al->map) {
1026 struct dso *dso = al->map->dso; 1034 struct dso *dso = al->map->dso;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index f729df5e25e6..a0dbcbd4f6d8 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -257,6 +257,7 @@ struct events_stats {
257 u64 total_non_filtered_period; 257 u64 total_non_filtered_period;
258 u64 total_lost; 258 u64 total_lost;
259 u64 total_lost_samples; 259 u64 total_lost_samples;
260 u64 total_aux_lost;
260 u64 total_invalid_chains; 261 u64 total_invalid_chains;
261 u32 nr_events[PERF_RECORD_HEADER_MAX]; 262 u32 nr_events[PERF_RECORD_HEADER_MAX];
262 u32 nr_non_filtered_samples; 263 u32 nr_non_filtered_samples;
@@ -478,6 +479,11 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
478 const struct perf_sample *sample, 479 const struct perf_sample *sample,
479 bool swapped); 480 bool swapped);
480 481
482pid_t perf_event__synthesize_comm(struct perf_tool *tool,
483 union perf_event *event, pid_t pid,
484 perf_event__handler_t process,
485 struct machine *machine);
486
481int perf_event__synthesize_mmap_events(struct perf_tool *tool, 487int perf_event__synthesize_mmap_events(struct perf_tool *tool,
482 union perf_event *event, 488 union perf_event *event,
483 pid_t pid, pid_t tgid, 489 pid_t pid, pid_t tgid,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c8fc8a258f42..d1392194a9a9 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -25,6 +25,7 @@
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/hash.h> 26#include <linux/hash.h>
27#include <linux/log2.h> 27#include <linux/log2.h>
28#include <linux/err.h>
28 29
29static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); 30static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
30static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); 31static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
@@ -164,6 +165,13 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
164 __perf_evlist__propagate_maps(evlist, entry); 165 __perf_evlist__propagate_maps(evlist, entry);
165} 166}
166 167
168void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
169{
170 evsel->evlist = NULL;
171 list_del_init(&evsel->node);
172 evlist->nr_entries -= 1;
173}
174
167void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 175void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
168 struct list_head *list) 176 struct list_head *list)
169{ 177{
@@ -197,6 +205,20 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
197 } 205 }
198} 206}
199 207
208void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
209{
210 attr->precise_ip = 3;
211
212 while (attr->precise_ip != 0) {
213 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
214 if (fd != -1) {
215 close(fd);
216 break;
217 }
218 --attr->precise_ip;
219 }
220}
221
200int perf_evlist__add_default(struct perf_evlist *evlist) 222int perf_evlist__add_default(struct perf_evlist *evlist)
201{ 223{
202 struct perf_event_attr attr = { 224 struct perf_event_attr attr = {
@@ -207,13 +229,15 @@ int perf_evlist__add_default(struct perf_evlist *evlist)
207 229
208 event_attr_init(&attr); 230 event_attr_init(&attr);
209 231
232 perf_event_attr__set_max_precise_ip(&attr);
233
210 evsel = perf_evsel__new(&attr); 234 evsel = perf_evsel__new(&attr);
211 if (evsel == NULL) 235 if (evsel == NULL)
212 goto error; 236 goto error;
213 237
214 /* use strdup() because free(evsel) assumes name is allocated */ 238 /* use asprintf() because free(evsel) assumes name is allocated */
215 evsel->name = strdup("cycles"); 239 if (asprintf(&evsel->name, "cycles%.*s",
216 if (!evsel->name) 240 attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
217 goto error_free; 241 goto error_free;
218 242
219 perf_evlist__add(evlist, evsel); 243 perf_evlist__add(evlist, evsel);
@@ -293,7 +317,7 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist,
293{ 317{
294 struct perf_evsel *evsel = perf_evsel__newtp(sys, name); 318 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
295 319
296 if (evsel == NULL) 320 if (IS_ERR(evsel))
297 return -1; 321 return -1;
298 322
299 evsel->handler = handler; 323 evsel->handler = handler;
@@ -616,6 +640,21 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
616 return NULL; 640 return NULL;
617} 641}
618 642
643struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
644 u64 id)
645{
646 struct perf_sample_id *sid;
647
648 if (!id)
649 return NULL;
650
651 sid = perf_evlist__id2sid(evlist, id);
652 if (sid)
653 return sid->evsel;
654
655 return NULL;
656}
657
619static int perf_evlist__event2id(struct perf_evlist *evlist, 658static int perf_evlist__event2id(struct perf_evlist *evlist,
620 union perf_event *event, u64 *id) 659 union perf_event *event, u64 *id)
621{ 660{
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 115d8b53c601..a459fe71b452 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -73,6 +73,7 @@ void perf_evlist__exit(struct perf_evlist *evlist);
73void perf_evlist__delete(struct perf_evlist *evlist); 73void perf_evlist__delete(struct perf_evlist *evlist);
74 74
75void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); 75void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
76void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel);
76int perf_evlist__add_default(struct perf_evlist *evlist); 77int perf_evlist__add_default(struct perf_evlist *evlist);
77int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 78int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
78 struct perf_event_attr *attrs, size_t nr_attrs); 79 struct perf_event_attr *attrs, size_t nr_attrs);
@@ -104,6 +105,8 @@ int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mas
104int perf_evlist__poll(struct perf_evlist *evlist, int timeout); 105int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
105 106
106struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); 107struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
108struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
109 u64 id);
107 110
108struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id); 111struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
109 112
@@ -287,4 +290,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
287 290
288void perf_evlist__set_tracking_event(struct perf_evlist *evlist, 291void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
289 struct perf_evsel *tracking_evsel); 292 struct perf_evsel *tracking_evsel);
293
294void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
290#endif /* __PERF_EVLIST_H */ 295#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 5410483d5219..397fb4ed3c97 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -9,10 +9,11 @@
9 9
10#include <byteswap.h> 10#include <byteswap.h>
11#include <linux/bitops.h> 11#include <linux/bitops.h>
12#include <api/fs/debugfs.h> 12#include <api/fs/tracing_path.h>
13#include <traceevent/event-parse.h> 13#include <traceevent/event-parse.h>
14#include <linux/hw_breakpoint.h> 14#include <linux/hw_breakpoint.h>
15#include <linux/perf_event.h> 15#include <linux/perf_event.h>
16#include <linux/err.h>
16#include <sys/resource.h> 17#include <sys/resource.h>
17#include "asm/bug.h" 18#include "asm/bug.h"
18#include "callchain.h" 19#include "callchain.h"
@@ -207,6 +208,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
207 evsel->unit = ""; 208 evsel->unit = "";
208 evsel->scale = 1.0; 209 evsel->scale = 1.0;
209 evsel->evlist = NULL; 210 evsel->evlist = NULL;
211 evsel->bpf_fd = -1;
210 INIT_LIST_HEAD(&evsel->node); 212 INIT_LIST_HEAD(&evsel->node);
211 INIT_LIST_HEAD(&evsel->config_terms); 213 INIT_LIST_HEAD(&evsel->config_terms);
212 perf_evsel__object.init(evsel); 214 perf_evsel__object.init(evsel);
@@ -225,11 +227,17 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
225 return evsel; 227 return evsel;
226} 228}
227 229
230/*
231 * Returns pointer with encoded error via <linux/err.h> interface.
232 */
228struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx) 233struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
229{ 234{
230 struct perf_evsel *evsel = zalloc(perf_evsel__object.size); 235 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
236 int err = -ENOMEM;
231 237
232 if (evsel != NULL) { 238 if (evsel == NULL) {
239 goto out_err;
240 } else {
233 struct perf_event_attr attr = { 241 struct perf_event_attr attr = {
234 .type = PERF_TYPE_TRACEPOINT, 242 .type = PERF_TYPE_TRACEPOINT,
235 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 243 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
@@ -240,8 +248,10 @@ struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int
240 goto out_free; 248 goto out_free;
241 249
242 evsel->tp_format = trace_event__tp_format(sys, name); 250 evsel->tp_format = trace_event__tp_format(sys, name);
243 if (evsel->tp_format == NULL) 251 if (IS_ERR(evsel->tp_format)) {
252 err = PTR_ERR(evsel->tp_format);
244 goto out_free; 253 goto out_free;
254 }
245 255
246 event_attr_init(&attr); 256 event_attr_init(&attr);
247 attr.config = evsel->tp_format->id; 257 attr.config = evsel->tp_format->id;
@@ -254,7 +264,8 @@ struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int
254out_free: 264out_free:
255 zfree(&evsel->name); 265 zfree(&evsel->name);
256 free(evsel); 266 free(evsel);
257 return NULL; 267out_err:
268 return ERR_PTR(err);
258} 269}
259 270
260const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { 271const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
@@ -642,6 +653,15 @@ static void apply_config_terms(struct perf_evsel *evsel,
642 case PERF_EVSEL__CONFIG_TERM_STACK_USER: 653 case PERF_EVSEL__CONFIG_TERM_STACK_USER:
643 dump_size = term->val.stack_user; 654 dump_size = term->val.stack_user;
644 break; 655 break;
656 case PERF_EVSEL__CONFIG_TERM_INHERIT:
657 /*
658 * attr->inherit should has already been set by
659 * perf_evsel__config. If user explicitly set
660 * inherit using config terms, override global
661 * opt->no_inherit setting.
662 */
663 attr->inherit = term->val.inherit ? 1 : 0;
664 break;
645 default: 665 default:
646 break; 666 break;
647 } 667 }
@@ -872,6 +892,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
872 attr->clockid = opts->clockid; 892 attr->clockid = opts->clockid;
873 } 893 }
874 894
895 if (evsel->precise_max)
896 perf_event_attr__set_max_precise_ip(attr);
897
875 /* 898 /*
876 * Apply event specific term settings, 899 * Apply event specific term settings,
877 * it overloads any global configuration. 900 * it overloads any global configuration.
@@ -1168,7 +1191,7 @@ static void __p_sample_type(char *buf, size_t size, u64 value)
1168 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), 1191 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1169 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), 1192 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1170 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), 1193 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1171 bit_name(IDENTIFIER), bit_name(REGS_INTR), 1194 bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
1172 { .name = NULL, } 1195 { .name = NULL, }
1173 }; 1196 };
1174#undef bit_name 1197#undef bit_name
@@ -1249,6 +1272,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
1249 PRINT_ATTRf(bp_type, p_unsigned); 1272 PRINT_ATTRf(bp_type, p_unsigned);
1250 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex); 1273 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
1251 PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex); 1274 PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
1275 PRINT_ATTRf(branch_sample_type, p_unsigned);
1252 PRINT_ATTRf(sample_regs_user, p_hex); 1276 PRINT_ATTRf(sample_regs_user, p_hex);
1253 PRINT_ATTRf(sample_stack_user, p_unsigned); 1277 PRINT_ATTRf(sample_stack_user, p_unsigned);
1254 PRINT_ATTRf(clockid, p_signed); 1278 PRINT_ATTRf(clockid, p_signed);
@@ -1333,6 +1357,22 @@ retry_open:
1333 err); 1357 err);
1334 goto try_fallback; 1358 goto try_fallback;
1335 } 1359 }
1360
1361 if (evsel->bpf_fd >= 0) {
1362 int evt_fd = FD(evsel, cpu, thread);
1363 int bpf_fd = evsel->bpf_fd;
1364
1365 err = ioctl(evt_fd,
1366 PERF_EVENT_IOC_SET_BPF,
1367 bpf_fd);
1368 if (err && errno != EEXIST) {
1369 pr_err("failed to attach bpf fd %d: %s\n",
1370 bpf_fd, strerror(errno));
1371 err = -EINVAL;
1372 goto out_close;
1373 }
1374 }
1375
1336 set_rlimit = NO_CHANGE; 1376 set_rlimit = NO_CHANGE;
1337 1377
1338 /* 1378 /*
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index ef8925f7211a..0e49bd742c63 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -43,6 +43,7 @@ enum {
43 PERF_EVSEL__CONFIG_TERM_TIME, 43 PERF_EVSEL__CONFIG_TERM_TIME,
44 PERF_EVSEL__CONFIG_TERM_CALLGRAPH, 44 PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
45 PERF_EVSEL__CONFIG_TERM_STACK_USER, 45 PERF_EVSEL__CONFIG_TERM_STACK_USER,
46 PERF_EVSEL__CONFIG_TERM_INHERIT,
46 PERF_EVSEL__CONFIG_TERM_MAX, 47 PERF_EVSEL__CONFIG_TERM_MAX,
47}; 48};
48 49
@@ -55,6 +56,7 @@ struct perf_evsel_config_term {
55 bool time; 56 bool time;
56 char *callgraph; 57 char *callgraph;
57 u64 stack_user; 58 u64 stack_user;
59 bool inherit;
58 } val; 60 } val;
59}; 61};
60 62
@@ -90,9 +92,9 @@ struct perf_evsel {
90 double scale; 92 double scale;
91 const char *unit; 93 const char *unit;
92 struct event_format *tp_format; 94 struct event_format *tp_format;
95 off_t id_offset;
93 union { 96 union {
94 void *priv; 97 void *priv;
95 off_t id_offset;
96 u64 db_id; 98 u64 db_id;
97 }; 99 };
98 struct cgroup_sel *cgrp; 100 struct cgroup_sel *cgrp;
@@ -111,6 +113,7 @@ struct perf_evsel {
111 bool system_wide; 113 bool system_wide;
112 bool tracking; 114 bool tracking;
113 bool per_pkg; 115 bool per_pkg;
116 bool precise_max;
114 /* parse modifier helper */ 117 /* parse modifier helper */
115 int exclude_GH; 118 int exclude_GH;
116 int nr_members; 119 int nr_members;
@@ -120,6 +123,7 @@ struct perf_evsel {
120 char *group_name; 123 char *group_name;
121 bool cmdline_group_boundary; 124 bool cmdline_group_boundary;
122 struct list_head config_terms; 125 struct list_head config_terms;
126 int bpf_fd;
123}; 127};
124 128
125union u64_swap { 129union u64_swap {
@@ -130,7 +134,6 @@ union u64_swap {
130struct cpu_map; 134struct cpu_map;
131struct target; 135struct target;
132struct thread_map; 136struct thread_map;
133struct perf_evlist;
134struct record_opts; 137struct record_opts;
135 138
136static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) 139static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
@@ -162,6 +165,9 @@ static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
162 165
163struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx); 166struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
164 167
168/*
169 * Returns pointer with encoded error via <linux/err.h> interface.
170 */
165static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name) 171static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
166{ 172{
167 return perf_evsel__newtp_idx(sys, name, 0); 173 return perf_evsel__newtp_idx(sys, name, 0);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index fce6634aebe2..43838003c1a1 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -24,9 +24,6 @@
24#include "build-id.h" 24#include "build-id.h"
25#include "data.h" 25#include "data.h"
26 26
27static u32 header_argc;
28static const char **header_argv;
29
30/* 27/*
31 * magic2 = "PERFILE2" 28 * magic2 = "PERFILE2"
32 * must be a numerical value to let the endianness 29 * must be a numerical value to let the endianness
@@ -88,6 +85,9 @@ int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
88 return err; 85 return err;
89} 86}
90 87
88#define string_size(str) \
89 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
90
91static int do_write_string(int fd, const char *str) 91static int do_write_string(int fd, const char *str)
92{ 92{
93 u32 len, olen; 93 u32 len, olen;
@@ -135,37 +135,6 @@ static char *do_read_string(int fd, struct perf_header *ph)
135 return NULL; 135 return NULL;
136} 136}
137 137
138int
139perf_header__set_cmdline(int argc, const char **argv)
140{
141 int i;
142
143 /*
144 * If header_argv has already been set, do not override it.
145 * This allows a command to set the cmdline, parse args and
146 * then call another builtin function that implements a
147 * command -- e.g, cmd_kvm calling cmd_record.
148 */
149 if (header_argv)
150 return 0;
151
152 header_argc = (u32)argc;
153
154 /* do not include NULL termination */
155 header_argv = calloc(argc, sizeof(char *));
156 if (!header_argv)
157 return -ENOMEM;
158
159 /*
160 * must copy argv contents because it gets moved
161 * around during option parsing
162 */
163 for (i = 0; i < argc ; i++)
164 header_argv[i] = argv[i];
165
166 return 0;
167}
168
169static int write_tracing_data(int fd, struct perf_header *h __maybe_unused, 138static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
170 struct perf_evlist *evlist) 139 struct perf_evlist *evlist)
171{ 140{
@@ -402,8 +371,8 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
402{ 371{
403 char buf[MAXPATHLEN]; 372 char buf[MAXPATHLEN];
404 char proc[32]; 373 char proc[32];
405 u32 i, n; 374 u32 n;
406 int ret; 375 int i, ret;
407 376
408 /* 377 /*
409 * actual atual path to perf binary 378 * actual atual path to perf binary
@@ -417,7 +386,7 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
417 buf[ret] = '\0'; 386 buf[ret] = '\0';
418 387
419 /* account for binary path */ 388 /* account for binary path */
420 n = header_argc + 1; 389 n = perf_env.nr_cmdline + 1;
421 390
422 ret = do_write(fd, &n, sizeof(n)); 391 ret = do_write(fd, &n, sizeof(n));
423 if (ret < 0) 392 if (ret < 0)
@@ -427,8 +396,8 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
427 if (ret < 0) 396 if (ret < 0)
428 return ret; 397 return ret;
429 398
430 for (i = 0 ; i < header_argc; i++) { 399 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
431 ret = do_write_string(fd, header_argv[i]); 400 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
432 if (ret < 0) 401 if (ret < 0)
433 return ret; 402 return ret;
434 } 403 }
@@ -441,6 +410,7 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
441 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" 410 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
442 411
443struct cpu_topo { 412struct cpu_topo {
413 u32 cpu_nr;
444 u32 core_sib; 414 u32 core_sib;
445 u32 thread_sib; 415 u32 thread_sib;
446 char **core_siblings; 416 char **core_siblings;
@@ -551,7 +521,7 @@ static struct cpu_topo *build_cpu_topology(void)
551 return NULL; 521 return NULL;
552 522
553 tp = addr; 523 tp = addr;
554 524 tp->cpu_nr = nr;
555 addr += sizeof(*tp); 525 addr += sizeof(*tp);
556 tp->core_siblings = addr; 526 tp->core_siblings = addr;
557 addr += sz; 527 addr += sz;
@@ -574,7 +544,7 @@ static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
574{ 544{
575 struct cpu_topo *tp; 545 struct cpu_topo *tp;
576 u32 i; 546 u32 i;
577 int ret; 547 int ret, j;
578 548
579 tp = build_cpu_topology(); 549 tp = build_cpu_topology();
580 if (!tp) 550 if (!tp)
@@ -598,6 +568,21 @@ static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
598 if (ret < 0) 568 if (ret < 0)
599 break; 569 break;
600 } 570 }
571
572 ret = perf_env__read_cpu_topology_map(&perf_env);
573 if (ret < 0)
574 goto done;
575
576 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
577 ret = do_write(fd, &perf_env.cpu[j].core_id,
578 sizeof(perf_env.cpu[j].core_id));
579 if (ret < 0)
580 return ret;
581 ret = do_write(fd, &perf_env.cpu[j].socket_id,
582 sizeof(perf_env.cpu[j].socket_id));
583 if (ret < 0)
584 return ret;
585 }
601done: 586done:
602 free_cpu_topo(tp); 587 free_cpu_topo(tp);
603 return ret; 588 return ret;
@@ -938,6 +923,7 @@ static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
938{ 923{
939 int nr, i; 924 int nr, i;
940 char *str; 925 char *str;
926 int cpu_nr = ph->env.nr_cpus_online;
941 927
942 nr = ph->env.nr_sibling_cores; 928 nr = ph->env.nr_sibling_cores;
943 str = ph->env.sibling_cores; 929 str = ph->env.sibling_cores;
@@ -954,6 +940,13 @@ static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
954 fprintf(fp, "# sibling threads : %s\n", str); 940 fprintf(fp, "# sibling threads : %s\n", str);
955 str += strlen(str) + 1; 941 str += strlen(str) + 1;
956 } 942 }
943
944 if (ph->env.cpu != NULL) {
945 for (i = 0; i < cpu_nr; i++)
946 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
947 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
948 } else
949 fprintf(fp, "# Core ID and Socket ID information is not available\n");
957} 950}
958 951
959static void free_event_desc(struct perf_evsel *events) 952static void free_event_desc(struct perf_evsel *events)
@@ -1582,7 +1575,7 @@ error:
1582 return -1; 1575 return -1;
1583} 1576}
1584 1577
1585static int process_cpu_topology(struct perf_file_section *section __maybe_unused, 1578static int process_cpu_topology(struct perf_file_section *section,
1586 struct perf_header *ph, int fd, 1579 struct perf_header *ph, int fd,
1587 void *data __maybe_unused) 1580 void *data __maybe_unused)
1588{ 1581{
@@ -1590,15 +1583,22 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
1590 u32 nr, i; 1583 u32 nr, i;
1591 char *str; 1584 char *str;
1592 struct strbuf sb; 1585 struct strbuf sb;
1586 int cpu_nr = ph->env.nr_cpus_online;
1587 u64 size = 0;
1588
1589 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1590 if (!ph->env.cpu)
1591 return -1;
1593 1592
1594 ret = readn(fd, &nr, sizeof(nr)); 1593 ret = readn(fd, &nr, sizeof(nr));
1595 if (ret != sizeof(nr)) 1594 if (ret != sizeof(nr))
1596 return -1; 1595 goto free_cpu;
1597 1596
1598 if (ph->needs_swap) 1597 if (ph->needs_swap)
1599 nr = bswap_32(nr); 1598 nr = bswap_32(nr);
1600 1599
1601 ph->env.nr_sibling_cores = nr; 1600 ph->env.nr_sibling_cores = nr;
1601 size += sizeof(u32);
1602 strbuf_init(&sb, 128); 1602 strbuf_init(&sb, 128);
1603 1603
1604 for (i = 0; i < nr; i++) { 1604 for (i = 0; i < nr; i++) {
@@ -1608,6 +1608,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
1608 1608
1609 /* include a NULL character at the end */ 1609 /* include a NULL character at the end */
1610 strbuf_add(&sb, str, strlen(str) + 1); 1610 strbuf_add(&sb, str, strlen(str) + 1);
1611 size += string_size(str);
1611 free(str); 1612 free(str);
1612 } 1613 }
1613 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 1614 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
@@ -1620,6 +1621,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
1620 nr = bswap_32(nr); 1621 nr = bswap_32(nr);
1621 1622
1622 ph->env.nr_sibling_threads = nr; 1623 ph->env.nr_sibling_threads = nr;
1624 size += sizeof(u32);
1623 1625
1624 for (i = 0; i < nr; i++) { 1626 for (i = 0; i < nr; i++) {
1625 str = do_read_string(fd, ph); 1627 str = do_read_string(fd, ph);
@@ -1628,13 +1630,57 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
1628 1630
1629 /* include a NULL character at the end */ 1631 /* include a NULL character at the end */
1630 strbuf_add(&sb, str, strlen(str) + 1); 1632 strbuf_add(&sb, str, strlen(str) + 1);
1633 size += string_size(str);
1631 free(str); 1634 free(str);
1632 } 1635 }
1633 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 1636 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1637
1638 /*
1639 * The header may be from old perf,
1640 * which doesn't include core id and socket id information.
1641 */
1642 if (section->size <= size) {
1643 zfree(&ph->env.cpu);
1644 return 0;
1645 }
1646
1647 for (i = 0; i < (u32)cpu_nr; i++) {
1648 ret = readn(fd, &nr, sizeof(nr));
1649 if (ret != sizeof(nr))
1650 goto free_cpu;
1651
1652 if (ph->needs_swap)
1653 nr = bswap_32(nr);
1654
1655 if (nr > (u32)cpu_nr) {
1656 pr_debug("core_id number is too big."
1657 "You may need to upgrade the perf tool.\n");
1658 goto free_cpu;
1659 }
1660 ph->env.cpu[i].core_id = nr;
1661
1662 ret = readn(fd, &nr, sizeof(nr));
1663 if (ret != sizeof(nr))
1664 goto free_cpu;
1665
1666 if (ph->needs_swap)
1667 nr = bswap_32(nr);
1668
1669 if (nr > (u32)cpu_nr) {
1670 pr_debug("socket_id number is too big."
1671 "You may need to upgrade the perf tool.\n");
1672 goto free_cpu;
1673 }
1674
1675 ph->env.cpu[i].socket_id = nr;
1676 }
1677
1634 return 0; 1678 return 0;
1635 1679
1636error: 1680error:
1637 strbuf_release(&sb); 1681 strbuf_release(&sb);
1682free_cpu:
1683 zfree(&ph->env.cpu);
1638 return -1; 1684 return -1;
1639} 1685}
1640 1686
@@ -1737,6 +1783,9 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
1737 /* include a NULL character at the end */ 1783 /* include a NULL character at the end */
1738 strbuf_add(&sb, "", 1); 1784 strbuf_add(&sb, "", 1);
1739 1785
1786 if (!strcmp(name, "msr"))
1787 ph->env.msr_pmu_type = type;
1788
1740 free(name); 1789 free(name);
1741 pmu_num--; 1790 pmu_num--;
1742 } 1791 }
@@ -2515,6 +2564,7 @@ int perf_session__read_header(struct perf_session *session)
2515 return -ENOMEM; 2564 return -ENOMEM;
2516 2565
2517 session->evlist->env = &header->env; 2566 session->evlist->env = &header->env;
2567 session->machines.host.env = &header->env;
2518 if (perf_data_file__is_pipe(file)) 2568 if (perf_data_file__is_pipe(file))
2519 return perf_header__read_pipe(session); 2569 return perf_header__read_pipe(session);
2520 2570
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 396e4965f0c9..05f27cb6b7e3 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -7,7 +7,7 @@
7#include <linux/bitmap.h> 7#include <linux/bitmap.h>
8#include <linux/types.h> 8#include <linux/types.h>
9#include "event.h" 9#include "event.h"
10 10#include "env.h"
11 11
12enum { 12enum {
13 HEADER_RESERVED = 0, /* always cleared */ 13 HEADER_RESERVED = 0, /* always cleared */
@@ -66,31 +66,6 @@ struct perf_header;
66int perf_file_header__read(struct perf_file_header *header, 66int perf_file_header__read(struct perf_file_header *header,
67 struct perf_header *ph, int fd); 67 struct perf_header *ph, int fd);
68 68
69struct perf_env {
70 char *hostname;
71 char *os_release;
72 char *version;
73 char *arch;
74 int nr_cpus_online;
75 int nr_cpus_avail;
76 char *cpu_desc;
77 char *cpuid;
78 unsigned long long total_mem;
79
80 int nr_cmdline;
81 int nr_sibling_cores;
82 int nr_sibling_threads;
83 int nr_numa_nodes;
84 int nr_pmu_mappings;
85 int nr_groups;
86 char *cmdline;
87 const char **cmdline_argv;
88 char *sibling_cores;
89 char *sibling_threads;
90 char *numa_nodes;
91 char *pmu_mappings;
92};
93
94struct perf_header { 69struct perf_header {
95 enum perf_header_version version; 70 enum perf_header_version version;
96 bool needs_swap; 71 bool needs_swap;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 08b6cd945f1e..4fd37d6708cb 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -15,6 +15,8 @@ static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he); 15 struct hist_entry *he);
16static bool hists__filter_entry_by_symbol(struct hists *hists, 16static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he); 17 struct hist_entry *he);
18static bool hists__filter_entry_by_socket(struct hists *hists,
19 struct hist_entry *he);
18 20
19u16 hists__col_len(struct hists *hists, enum hist_column col) 21u16 hists__col_len(struct hists *hists, enum hist_column col)
20{ 22{
@@ -130,6 +132,18 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
130 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 132 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 symlen); 133 symlen);
132 } 134 }
135
136 if (h->mem_info->iaddr.sym) {
137 symlen = (int)h->mem_info->iaddr.sym->namelen + 4
138 + unresolved_col_width + 2;
139 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
140 symlen);
141 } else {
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
144 symlen);
145 }
146
133 if (h->mem_info->daddr.map) { 147 if (h->mem_info->daddr.map) {
134 symlen = dso__name_len(h->mem_info->daddr.map->dso); 148 symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 149 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
@@ -141,9 +155,12 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
141 } else { 155 } else {
142 symlen = unresolved_col_width + 4 + 2; 156 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 157 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
158 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 159 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 } 160 }
146 161
162 hists__new_col_len(hists, HISTC_CPU, 3);
163 hists__new_col_len(hists, HISTC_SOCKET, 6);
147 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 164 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
148 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 165 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
149 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 166 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
@@ -452,6 +469,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
452 .map = al->map, 469 .map = al->map,
453 .sym = al->sym, 470 .sym = al->sym,
454 }, 471 },
472 .socket = al->socket,
455 .cpu = al->cpu, 473 .cpu = al->cpu,
456 .cpumode = al->cpumode, 474 .cpumode = al->cpumode,
457 .ip = al->addr, 475 .ip = al->addr,
@@ -690,7 +708,7 @@ iter_finish_normal_entry(struct hist_entry_iter *iter,
690} 708}
691 709
692static int 710static int
693iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused, 711iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
694 struct addr_location *al __maybe_unused) 712 struct addr_location *al __maybe_unused)
695{ 713{
696 struct hist_entry **he_cache; 714 struct hist_entry **he_cache;
@@ -702,7 +720,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
702 * cumulated only one time to prevent entries more than 100% 720 * cumulated only one time to prevent entries more than 100%
703 * overhead. 721 * overhead.
704 */ 722 */
705 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1)); 723 he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
706 if (he_cache == NULL) 724 if (he_cache == NULL)
707 return -ENOMEM; 725 return -ENOMEM;
708 726
@@ -863,6 +881,8 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
863 if (err) 881 if (err)
864 return err; 882 return err;
865 883
884 iter->max_stack = max_stack_depth;
885
866 err = iter->ops->prepare_entry(iter, al); 886 err = iter->ops->prepare_entry(iter, al);
867 if (err) 887 if (err)
868 goto out; 888 goto out;
@@ -1024,6 +1044,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1024 hists__filter_entry_by_dso(hists, he); 1044 hists__filter_entry_by_dso(hists, he);
1025 hists__filter_entry_by_thread(hists, he); 1045 hists__filter_entry_by_thread(hists, he);
1026 hists__filter_entry_by_symbol(hists, he); 1046 hists__filter_entry_by_symbol(hists, he);
1047 hists__filter_entry_by_socket(hists, he);
1027} 1048}
1028 1049
1029void hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1050void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
@@ -1143,7 +1164,7 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1143 struct perf_evsel *evsel = hists_to_evsel(hists); 1164 struct perf_evsel *evsel = hists_to_evsel(hists);
1144 bool use_callchain; 1165 bool use_callchain;
1145 1166
1146 if (evsel && !symbol_conf.show_ref_callgraph) 1167 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1147 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN; 1168 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1148 else 1169 else
1149 use_callchain = symbol_conf.use_callchain; 1170 use_callchain = symbol_conf.use_callchain;
@@ -1292,6 +1313,37 @@ void hists__filter_by_symbol(struct hists *hists)
1292 } 1313 }
1293} 1314}
1294 1315
1316static bool hists__filter_entry_by_socket(struct hists *hists,
1317 struct hist_entry *he)
1318{
1319 if ((hists->socket_filter > -1) &&
1320 (he->socket != hists->socket_filter)) {
1321 he->filtered |= (1 << HIST_FILTER__SOCKET);
1322 return true;
1323 }
1324
1325 return false;
1326}
1327
1328void hists__filter_by_socket(struct hists *hists)
1329{
1330 struct rb_node *nd;
1331
1332 hists->stats.nr_non_filtered_samples = 0;
1333
1334 hists__reset_filter_stats(hists);
1335 hists__reset_col_len(hists);
1336
1337 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1338 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1339
1340 if (hists__filter_entry_by_socket(hists, h))
1341 continue;
1342
1343 hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
1344 }
1345}
1346
1295void events_stats__inc(struct events_stats *stats, u32 type) 1347void events_stats__inc(struct events_stats *stats, u32 type)
1296{ 1348{
1297 ++stats->nr_events[0]; 1349 ++stats->nr_events[0];
@@ -1517,6 +1569,7 @@ static int hists_evsel__init(struct perf_evsel *evsel)
1517 hists->entries_collapsed = RB_ROOT; 1569 hists->entries_collapsed = RB_ROOT;
1518 hists->entries = RB_ROOT; 1570 hists->entries = RB_ROOT;
1519 pthread_mutex_init(&hists->lock, NULL); 1571 pthread_mutex_init(&hists->lock, NULL);
1572 hists->socket_filter = -1;
1520 return 0; 1573 return 0;
1521} 1574}
1522 1575
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index de6d58e7f0d5..a48a2078d288 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -20,6 +20,7 @@ enum hist_filter {
20 HIST_FILTER__SYMBOL, 20 HIST_FILTER__SYMBOL,
21 HIST_FILTER__GUEST, 21 HIST_FILTER__GUEST,
22 HIST_FILTER__HOST, 22 HIST_FILTER__HOST,
23 HIST_FILTER__SOCKET,
23}; 24};
24 25
25enum hist_column { 26enum hist_column {
@@ -29,6 +30,7 @@ enum hist_column {
29 HISTC_COMM, 30 HISTC_COMM,
30 HISTC_PARENT, 31 HISTC_PARENT,
31 HISTC_CPU, 32 HISTC_CPU,
33 HISTC_SOCKET,
32 HISTC_SRCLINE, 34 HISTC_SRCLINE,
33 HISTC_SRCFILE, 35 HISTC_SRCFILE,
34 HISTC_MISPREDICT, 36 HISTC_MISPREDICT,
@@ -47,6 +49,7 @@ enum hist_column {
47 HISTC_MEM_LVL, 49 HISTC_MEM_LVL,
48 HISTC_MEM_SNOOP, 50 HISTC_MEM_SNOOP,
49 HISTC_MEM_DCACHELINE, 51 HISTC_MEM_DCACHELINE,
52 HISTC_MEM_IADDR_SYMBOL,
50 HISTC_TRANSACTION, 53 HISTC_TRANSACTION,
51 HISTC_CYCLES, 54 HISTC_CYCLES,
52 HISTC_NR_COLS, /* Last entry */ 55 HISTC_NR_COLS, /* Last entry */
@@ -70,6 +73,7 @@ struct hists {
70 struct events_stats stats; 73 struct events_stats stats;
71 u64 event_stream; 74 u64 event_stream;
72 u16 col_len[HISTC_NR_COLS]; 75 u16 col_len[HISTC_NR_COLS];
76 int socket_filter;
73}; 77};
74 78
75struct hist_entry_iter; 79struct hist_entry_iter;
@@ -87,6 +91,7 @@ struct hist_entry_iter {
87 int curr; 91 int curr;
88 92
89 bool hide_unresolved; 93 bool hide_unresolved;
94 int max_stack;
90 95
91 struct perf_evsel *evsel; 96 struct perf_evsel *evsel;
92 struct perf_sample *sample; 97 struct perf_sample *sample;
@@ -144,11 +149,12 @@ size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp);
144void hists__filter_by_dso(struct hists *hists); 149void hists__filter_by_dso(struct hists *hists);
145void hists__filter_by_thread(struct hists *hists); 150void hists__filter_by_thread(struct hists *hists);
146void hists__filter_by_symbol(struct hists *hists); 151void hists__filter_by_symbol(struct hists *hists);
152void hists__filter_by_socket(struct hists *hists);
147 153
148static inline bool hists__has_filter(struct hists *hists) 154static inline bool hists__has_filter(struct hists *hists)
149{ 155{
150 return hists->thread_filter || hists->dso_filter || 156 return hists->thread_filter || hists->dso_filter ||
151 hists->symbol_filter_str; 157 hists->symbol_filter_str || (hists->socket_filter > -1);
152} 158}
153 159
154u16 hists__col_len(struct hists *hists, enum hist_column col); 160u16 hists__col_len(struct hists *hists, enum hist_column col);
diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h
index 8f149655f497..07c644ed64c4 100644
--- a/tools/perf/util/include/dwarf-regs.h
+++ b/tools/perf/util/include/dwarf-regs.h
@@ -5,4 +5,12 @@
5const char *get_arch_regstr(unsigned int n); 5const char *get_arch_regstr(unsigned int n);
6#endif 6#endif
7 7
8#ifdef HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
9/*
10 * Arch should support fetching the offset of a register in pt_regs
11 * by its name. See kernel's regs_query_register_offset in
12 * arch/xxx/kernel/ptrace.c.
13 */
14int regs_query_register_offset(const char *name);
15#endif
8#endif 16#endif
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 2386322ece4f..0611d619a42e 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -7,6 +7,17 @@ $(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_table
7 $(call rule_mkdir) 7 $(call rule_mkdir)
8 @$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@ 8 @$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
9 9
10$(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c 10$(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/intel-pt-insn-decoder.c util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
11 @(test -d ../../kernel -a -d ../../tools -a -d ../perf && (( \
12 diff -B -I'^#include' util/intel-pt-decoder/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
13 diff -B -I'^#include' util/intel-pt-decoder/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
14 diff -B util/intel-pt-decoder/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
15 diff -B util/intel-pt-decoder/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
16 diff -B -I'^#include' util/intel-pt-decoder/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
17 diff -B -I'^#include' util/intel-pt-decoder/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
18 diff -B -I'^#include' util/intel-pt-decoder/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
19 || echo "Warning: Intel PT: x86 instruction decoder differs from kernel" >&2 )) || true
20 $(call rule_mkdir)
21 $(call if_changed_dep,cc_o_c)
11 22
12CFLAGS_intel-pt-insn-decoder.o += -I$(OUTPUT)util/intel-pt-decoder -Wno-override-init 23CFLAGS_intel-pt-insn-decoder.o += -I$(OUTPUT)util/intel-pt-decoder -Wno-override-init
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 22ba50224319..9409d014b46c 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -650,7 +650,7 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
650 if (data->from_mtc && timestamp < data->timestamp && 650 if (data->from_mtc && timestamp < data->timestamp &&
651 data->timestamp - timestamp < decoder->tsc_slip) 651 data->timestamp - timestamp < decoder->tsc_slip)
652 return 1; 652 return 1;
653 while (timestamp < data->timestamp) 653 if (timestamp < data->timestamp)
654 timestamp += (1ULL << 56); 654 timestamp += (1ULL << 56);
655 if (pkt_info->last_packet_type != INTEL_PT_CYC) { 655 if (pkt_info->last_packet_type != INTEL_PT_CYC) {
656 if (data->from_mtc) 656 if (data->from_mtc)
@@ -1191,7 +1191,7 @@ static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
1191 timestamp); 1191 timestamp);
1192 timestamp = decoder->timestamp; 1192 timestamp = decoder->timestamp;
1193 } 1193 }
1194 while (timestamp < decoder->timestamp) { 1194 if (timestamp < decoder->timestamp) {
1195 intel_pt_log_to("Wraparound timestamp", timestamp); 1195 intel_pt_log_to("Wraparound timestamp", timestamp);
1196 timestamp += (1ULL << 56); 1196 timestamp += (1ULL << 56);
1197 decoder->tsc_timestamp = timestamp; 1197 decoder->tsc_timestamp = timestamp;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.c b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
index d09c7d9f9050..319bef33a64b 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
@@ -29,18 +29,18 @@
29 29
30static FILE *f; 30static FILE *f;
31static char log_name[MAX_LOG_NAME]; 31static char log_name[MAX_LOG_NAME];
32static bool enable_logging; 32bool intel_pt_enable_logging;
33 33
34void intel_pt_log_enable(void) 34void intel_pt_log_enable(void)
35{ 35{
36 enable_logging = true; 36 intel_pt_enable_logging = true;
37} 37}
38 38
39void intel_pt_log_disable(void) 39void intel_pt_log_disable(void)
40{ 40{
41 if (f) 41 if (f)
42 fflush(f); 42 fflush(f);
43 enable_logging = false; 43 intel_pt_enable_logging = false;
44} 44}
45 45
46void intel_pt_log_set_name(const char *name) 46void intel_pt_log_set_name(const char *name)
@@ -80,7 +80,7 @@ static void intel_pt_print_no_data(uint64_t pos, int indent)
80 80
81static int intel_pt_log_open(void) 81static int intel_pt_log_open(void)
82{ 82{
83 if (!enable_logging) 83 if (!intel_pt_enable_logging)
84 return -1; 84 return -1;
85 85
86 if (f) 86 if (f)
@@ -91,15 +91,15 @@ static int intel_pt_log_open(void)
91 91
92 f = fopen(log_name, "w+"); 92 f = fopen(log_name, "w+");
93 if (!f) { 93 if (!f) {
94 enable_logging = false; 94 intel_pt_enable_logging = false;
95 return -1; 95 return -1;
96 } 96 }
97 97
98 return 0; 98 return 0;
99} 99}
100 100
101void intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len, 101void __intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
102 uint64_t pos, const unsigned char *buf) 102 uint64_t pos, const unsigned char *buf)
103{ 103{
104 char desc[INTEL_PT_PKT_DESC_MAX]; 104 char desc[INTEL_PT_PKT_DESC_MAX];
105 105
@@ -111,7 +111,7 @@ void intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
111 fprintf(f, "%s\n", desc); 111 fprintf(f, "%s\n", desc);
112} 112}
113 113
114void intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip) 114void __intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
115{ 115{
116 char desc[INTEL_PT_INSN_DESC_MAX]; 116 char desc[INTEL_PT_INSN_DESC_MAX];
117 size_t len = intel_pt_insn->length; 117 size_t len = intel_pt_insn->length;
@@ -128,7 +128,8 @@ void intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
128 fprintf(f, "Bad instruction!\n"); 128 fprintf(f, "Bad instruction!\n");
129} 129}
130 130
131void intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn, uint64_t ip) 131void __intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn,
132 uint64_t ip)
132{ 133{
133 char desc[INTEL_PT_INSN_DESC_MAX]; 134 char desc[INTEL_PT_INSN_DESC_MAX];
134 135
@@ -142,7 +143,7 @@ void intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
142 fprintf(f, "Bad instruction!\n"); 143 fprintf(f, "Bad instruction!\n");
143} 144}
144 145
145void intel_pt_log(const char *fmt, ...) 146void __intel_pt_log(const char *fmt, ...)
146{ 147{
147 va_list args; 148 va_list args;
148 149
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
index db3942f83677..debe751dc3d6 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
@@ -25,20 +25,46 @@ void intel_pt_log_enable(void);
25void intel_pt_log_disable(void); 25void intel_pt_log_disable(void);
26void intel_pt_log_set_name(const char *name); 26void intel_pt_log_set_name(const char *name);
27 27
28void intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len, 28void __intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
29 uint64_t pos, const unsigned char *buf); 29 uint64_t pos, const unsigned char *buf);
30 30
31struct intel_pt_insn; 31struct intel_pt_insn;
32 32
33void intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip); 33void __intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip);
34void intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn, 34void __intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn,
35 uint64_t ip); 35 uint64_t ip);
36 36
37__attribute__((format(printf, 1, 2))) 37__attribute__((format(printf, 1, 2)))
38void intel_pt_log(const char *fmt, ...); 38void __intel_pt_log(const char *fmt, ...);
39
40#define intel_pt_log(fmt, ...) \
41 do { \
42 if (intel_pt_enable_logging) \
43 __intel_pt_log(fmt, ##__VA_ARGS__); \
44 } while (0)
45
46#define intel_pt_log_packet(arg, ...) \
47 do { \
48 if (intel_pt_enable_logging) \
49 __intel_pt_log_packet(arg, ##__VA_ARGS__); \
50 } while (0)
51
52#define intel_pt_log_insn(arg, ...) \
53 do { \
54 if (intel_pt_enable_logging) \
55 __intel_pt_log_insn(arg, ##__VA_ARGS__); \
56 } while (0)
57
58#define intel_pt_log_insn_no_data(arg, ...) \
59 do { \
60 if (intel_pt_enable_logging) \
61 __intel_pt_log_insn_no_data(arg, ##__VA_ARGS__); \
62 } while (0)
39 63
40#define x64_fmt "0x%" PRIx64 64#define x64_fmt "0x%" PRIx64
41 65
66extern bool intel_pt_enable_logging;
67
42static inline void intel_pt_log_at(const char *msg, uint64_t u) 68static inline void intel_pt_log_at(const char *msg, uint64_t u)
43{ 69{
44 intel_pt_log("%s at " x64_fmt "\n", msg, u); 70 intel_pt_log("%s at " x64_fmt "\n", msg, u);
diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
index 816488c0b97e..d388de72eaca 100644
--- a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
+++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
@@ -353,8 +353,12 @@ AVXcode: 1
35317: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) 35317: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
35418: Grp16 (1A) 35418: Grp16 (1A)
35519: 35519:
3561a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv 356# Intel SDM opcode map does not list MPX instructions. For now using Gv for
3571b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv 357# bnd registers and Ev for everything else is OK because the instruction
358# decoder does not use the information except as an indication that there is
359# a ModR/M byte.
3601a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
3611b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
3581c: 3621c:
3591d: 3631d:
3601e: 3641e:
@@ -732,6 +736,12 @@ bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
732be: vfnmsub231ps/d Vx,Hx,Wx (66),(v) 736be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
733bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1) 737bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
734# 0x0f 0x38 0xc0-0xff 738# 0x0f 0x38 0xc0-0xff
739c8: sha1nexte Vdq,Wdq
740c9: sha1msg1 Vdq,Wdq
741ca: sha1msg2 Vdq,Wdq
742cb: sha256rnds2 Vdq,Wdq
743cc: sha256msg1 Vdq,Wdq
744cd: sha256msg2 Vdq,Wdq
735db: VAESIMC Vdq,Wdq (66),(v1) 745db: VAESIMC Vdq,Wdq (66),(v1)
736dc: VAESENC Vdq,Hdq,Wdq (66),(v1) 746dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
737dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1) 747dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
@@ -790,6 +800,7 @@ AVXcode: 3
79061: vpcmpestri Vdq,Wdq,Ib (66),(v1) 80061: vpcmpestri Vdq,Wdq,Ib (66),(v1)
79162: vpcmpistrm Vdq,Wdq,Ib (66),(v1) 80162: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
79263: vpcmpistri Vdq,Wdq,Ib (66),(v1) 80263: vpcmpistri Vdq,Wdq,Ib (66),(v1)
803cc: sha1rnds4 Vdq,Wdq,Ib
793df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1) 804df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
794f0: RORX Gy,Ey,Ib (F2),(v) 805f0: RORX Gy,Ey,Ib (F2),(v)
795EndTable 806EndTable
@@ -874,7 +885,7 @@ GrpTable: Grp7
8742: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) 8852: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
8753: LIDT Ms 8863: LIDT Ms
8764: SMSW Mw/Rv 8874: SMSW Mw/Rv
8775: 8885: rdpkru (110),(11B) | wrpkru (111),(11B)
8786: LMSW Ew 8896: LMSW Ew
8797: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B) 8907: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
880EndTable 891EndTable
@@ -888,6 +899,9 @@ EndTable
888 899
889GrpTable: Grp9 900GrpTable: Grp9
8901: CMPXCHG8B/16B Mq/Mdq 9011: CMPXCHG8B/16B Mq/Mdq
9023: xrstors
9034: xsavec
9045: xsaves
8916: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B) 9056: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
8927: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B) 9067: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
893EndTable 907EndTable
@@ -932,8 +946,8 @@ GrpTable: Grp15
9323: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B) 9463: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
9334: XSAVE 9474: XSAVE
9345: XRSTOR | lfence (11B) 9485: XRSTOR | lfence (11B)
9356: XSAVEOPT | mfence (11B) 9496: XSAVEOPT | clwb (66) | mfence (11B)
9367: clflush | sfence (11B) 9507: clflush | clflushopt (66) | sfence (11B) | pcommit (66),(11B)
937EndTable 951EndTable
938 952
939GrpTable: Grp16 953GrpTable: Grp16
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 535d86f8e4d1..97f963a3dcb9 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -22,6 +22,7 @@
22#include "../perf.h" 22#include "../perf.h"
23#include "session.h" 23#include "session.h"
24#include "machine.h" 24#include "machine.h"
25#include "sort.h"
25#include "tool.h" 26#include "tool.h"
26#include "event.h" 27#include "event.h"
27#include "evlist.h" 28#include "evlist.h"
@@ -63,6 +64,7 @@ struct intel_pt {
63 bool data_queued; 64 bool data_queued;
64 bool est_tsc; 65 bool est_tsc;
65 bool sync_switch; 66 bool sync_switch;
67 bool mispred_all;
66 int have_sched_switch; 68 int have_sched_switch;
67 u32 pmu_type; 69 u32 pmu_type;
68 u64 kernel_start; 70 u64 kernel_start;
@@ -115,6 +117,9 @@ struct intel_pt_queue {
115 void *decoder; 117 void *decoder;
116 const struct intel_pt_state *state; 118 const struct intel_pt_state *state;
117 struct ip_callchain *chain; 119 struct ip_callchain *chain;
120 struct branch_stack *last_branch;
121 struct branch_stack *last_branch_rb;
122 size_t last_branch_pos;
118 union perf_event *event_buf; 123 union perf_event *event_buf;
119 bool on_heap; 124 bool on_heap;
120 bool stop; 125 bool stop;
@@ -675,6 +680,19 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
675 goto out_free; 680 goto out_free;
676 } 681 }
677 682
683 if (pt->synth_opts.last_branch) {
684 size_t sz = sizeof(struct branch_stack);
685
686 sz += pt->synth_opts.last_branch_sz *
687 sizeof(struct branch_entry);
688 ptq->last_branch = zalloc(sz);
689 if (!ptq->last_branch)
690 goto out_free;
691 ptq->last_branch_rb = zalloc(sz);
692 if (!ptq->last_branch_rb)
693 goto out_free;
694 }
695
678 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); 696 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
679 if (!ptq->event_buf) 697 if (!ptq->event_buf)
680 goto out_free; 698 goto out_free;
@@ -720,7 +738,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
720 738
721 if (!params.period) { 739 if (!params.period) {
722 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS; 740 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
723 params.period = 1000; 741 params.period = 1;
724 } 742 }
725 } 743 }
726 744
@@ -732,6 +750,8 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
732 750
733out_free: 751out_free:
734 zfree(&ptq->event_buf); 752 zfree(&ptq->event_buf);
753 zfree(&ptq->last_branch);
754 zfree(&ptq->last_branch_rb);
735 zfree(&ptq->chain); 755 zfree(&ptq->chain);
736 free(ptq); 756 free(ptq);
737 return NULL; 757 return NULL;
@@ -746,6 +766,8 @@ static void intel_pt_free_queue(void *priv)
746 thread__zput(ptq->thread); 766 thread__zput(ptq->thread);
747 intel_pt_decoder_free(ptq->decoder); 767 intel_pt_decoder_free(ptq->decoder);
748 zfree(&ptq->event_buf); 768 zfree(&ptq->event_buf);
769 zfree(&ptq->last_branch);
770 zfree(&ptq->last_branch_rb);
749 zfree(&ptq->chain); 771 zfree(&ptq->chain);
750 free(ptq); 772 free(ptq);
751} 773}
@@ -876,6 +898,58 @@ static int intel_pt_setup_queues(struct intel_pt *pt)
876 return 0; 898 return 0;
877} 899}
878 900
901static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
902{
903 struct branch_stack *bs_src = ptq->last_branch_rb;
904 struct branch_stack *bs_dst = ptq->last_branch;
905 size_t nr = 0;
906
907 bs_dst->nr = bs_src->nr;
908
909 if (!bs_src->nr)
910 return;
911
912 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
913 memcpy(&bs_dst->entries[0],
914 &bs_src->entries[ptq->last_branch_pos],
915 sizeof(struct branch_entry) * nr);
916
917 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
918 memcpy(&bs_dst->entries[nr],
919 &bs_src->entries[0],
920 sizeof(struct branch_entry) * ptq->last_branch_pos);
921 }
922}
923
924static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
925{
926 ptq->last_branch_pos = 0;
927 ptq->last_branch_rb->nr = 0;
928}
929
930static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
931{
932 const struct intel_pt_state *state = ptq->state;
933 struct branch_stack *bs = ptq->last_branch_rb;
934 struct branch_entry *be;
935
936 if (!ptq->last_branch_pos)
937 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
938
939 ptq->last_branch_pos -= 1;
940
941 be = &bs->entries[ptq->last_branch_pos];
942 be->from = state->from_ip;
943 be->to = state->to_ip;
944 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
945 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
946 /* No support for mispredict */
947 be->flags.mispred = ptq->pt->mispred_all;
948
949 if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
950 bs->nr += 1;
951}
952
879static int intel_pt_inject_event(union perf_event *event, 953static int intel_pt_inject_event(union perf_event *event,
880 struct perf_sample *sample, u64 type, 954 struct perf_sample *sample, u64 type,
881 bool swapped) 955 bool swapped)
@@ -890,6 +964,13 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
890 struct intel_pt *pt = ptq->pt; 964 struct intel_pt *pt = ptq->pt;
891 union perf_event *event = ptq->event_buf; 965 union perf_event *event = ptq->event_buf;
892 struct perf_sample sample = { .ip = 0, }; 966 struct perf_sample sample = { .ip = 0, };
967 struct dummy_branch_stack {
968 u64 nr;
969 struct branch_entry entries;
970 } dummy_bs;
971
972 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
973 return 0;
893 974
894 event->sample.header.type = PERF_RECORD_SAMPLE; 975 event->sample.header.type = PERF_RECORD_SAMPLE;
895 event->sample.header.misc = PERF_RECORD_MISC_USER; 976 event->sample.header.misc = PERF_RECORD_MISC_USER;
@@ -909,8 +990,20 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
909 sample.flags = ptq->flags; 990 sample.flags = ptq->flags;
910 sample.insn_len = ptq->insn_len; 991 sample.insn_len = ptq->insn_len;
911 992
912 if (pt->branches_filter && !(pt->branches_filter & ptq->flags)) 993 /*
913 return 0; 994 * perf report cannot handle events without a branch stack when using
995 * SORT_MODE__BRANCH so make a dummy one.
996 */
997 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
998 dummy_bs = (struct dummy_branch_stack){
999 .nr = 1,
1000 .entries = {
1001 .from = sample.ip,
1002 .to = sample.addr,
1003 },
1004 };
1005 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1006 }
914 1007
915 if (pt->synth_opts.inject) { 1008 if (pt->synth_opts.inject) {
916 ret = intel_pt_inject_event(event, &sample, 1009 ret = intel_pt_inject_event(event, &sample,
@@ -961,6 +1054,11 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
961 sample.callchain = ptq->chain; 1054 sample.callchain = ptq->chain;
962 } 1055 }
963 1056
1057 if (pt->synth_opts.last_branch) {
1058 intel_pt_copy_last_branch_rb(ptq);
1059 sample.branch_stack = ptq->last_branch;
1060 }
1061
964 if (pt->synth_opts.inject) { 1062 if (pt->synth_opts.inject) {
965 ret = intel_pt_inject_event(event, &sample, 1063 ret = intel_pt_inject_event(event, &sample,
966 pt->instructions_sample_type, 1064 pt->instructions_sample_type,
@@ -974,6 +1072,9 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
974 pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n", 1072 pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
975 ret); 1073 ret);
976 1074
1075 if (pt->synth_opts.last_branch)
1076 intel_pt_reset_last_branch_rb(ptq);
1077
977 return ret; 1078 return ret;
978} 1079}
979 1080
@@ -1008,6 +1109,11 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1008 sample.callchain = ptq->chain; 1109 sample.callchain = ptq->chain;
1009 } 1110 }
1010 1111
1112 if (pt->synth_opts.last_branch) {
1113 intel_pt_copy_last_branch_rb(ptq);
1114 sample.branch_stack = ptq->last_branch;
1115 }
1116
1011 if (pt->synth_opts.inject) { 1117 if (pt->synth_opts.inject) {
1012 ret = intel_pt_inject_event(event, &sample, 1118 ret = intel_pt_inject_event(event, &sample,
1013 pt->transactions_sample_type, 1119 pt->transactions_sample_type,
@@ -1021,6 +1127,9 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1021 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n", 1127 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
1022 ret); 1128 ret);
1023 1129
1130 if (pt->synth_opts.callchain)
1131 intel_pt_reset_last_branch_rb(ptq);
1132
1024 return ret; 1133 return ret;
1025} 1134}
1026 1135
@@ -1116,6 +1225,9 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
1116 return err; 1225 return err;
1117 } 1226 }
1118 1227
1228 if (pt->synth_opts.last_branch)
1229 intel_pt_update_last_branch_rb(ptq);
1230
1119 if (!pt->sync_switch) 1231 if (!pt->sync_switch)
1120 return 0; 1232 return 0;
1121 1233
@@ -1145,16 +1257,18 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
1145 return 0; 1257 return 0;
1146} 1258}
1147 1259
1148static u64 intel_pt_switch_ip(struct machine *machine, u64 *ptss_ip) 1260static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
1149{ 1261{
1262 struct machine *machine = pt->machine;
1150 struct map *map; 1263 struct map *map;
1151 struct symbol *sym, *start; 1264 struct symbol *sym, *start;
1152 u64 ip, switch_ip = 0; 1265 u64 ip, switch_ip = 0;
1266 const char *ptss;
1153 1267
1154 if (ptss_ip) 1268 if (ptss_ip)
1155 *ptss_ip = 0; 1269 *ptss_ip = 0;
1156 1270
1157 map = machine__kernel_map(machine, MAP__FUNCTION); 1271 map = machine__kernel_map(machine);
1158 if (!map) 1272 if (!map)
1159 return 0; 1273 return 0;
1160 1274
@@ -1177,8 +1291,13 @@ static u64 intel_pt_switch_ip(struct machine *machine, u64 *ptss_ip)
1177 if (!switch_ip || !ptss_ip) 1291 if (!switch_ip || !ptss_ip)
1178 return 0; 1292 return 0;
1179 1293
1294 if (pt->have_sched_switch == 1)
1295 ptss = "perf_trace_sched_switch";
1296 else
1297 ptss = "__perf_event_task_sched_out";
1298
1180 for (sym = start; sym; sym = dso__next_symbol(sym)) { 1299 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1181 if (!strcmp(sym->name, "perf_trace_sched_switch")) { 1300 if (!strcmp(sym->name, ptss)) {
1182 ip = map->unmap_ip(map, sym->start); 1301 ip = map->unmap_ip(map, sym->start);
1183 if (ip >= map->start && ip < map->end) { 1302 if (ip >= map->start && ip < map->end) {
1184 *ptss_ip = ip; 1303 *ptss_ip = ip;
@@ -1198,11 +1317,11 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1198 1317
1199 if (!pt->kernel_start) { 1318 if (!pt->kernel_start) {
1200 pt->kernel_start = machine__kernel_start(pt->machine); 1319 pt->kernel_start = machine__kernel_start(pt->machine);
1201 if (pt->per_cpu_mmaps && pt->have_sched_switch && 1320 if (pt->per_cpu_mmaps &&
1321 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
1202 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) && 1322 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
1203 !pt->sampling_mode) { 1323 !pt->sampling_mode) {
1204 pt->switch_ip = intel_pt_switch_ip(pt->machine, 1324 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
1205 &pt->ptss_ip);
1206 if (pt->switch_ip) { 1325 if (pt->switch_ip) {
1207 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n", 1326 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1208 pt->switch_ip, pt->ptss_ip); 1327 pt->switch_ip, pt->ptss_ip);
@@ -1387,31 +1506,18 @@ static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
1387 return NULL; 1506 return NULL;
1388} 1507}
1389 1508
1390static int intel_pt_process_switch(struct intel_pt *pt, 1509static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1391 struct perf_sample *sample) 1510 u64 timestamp)
1392{ 1511{
1393 struct intel_pt_queue *ptq; 1512 struct intel_pt_queue *ptq;
1394 struct perf_evsel *evsel; 1513 int err;
1395 pid_t tid;
1396 int cpu, err;
1397
1398 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1399 if (evsel != pt->switch_evsel)
1400 return 0;
1401
1402 tid = perf_evsel__intval(evsel, sample, "next_pid");
1403 cpu = sample->cpu;
1404
1405 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1406 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1407 &pt->tc));
1408 1514
1409 if (!pt->sync_switch) 1515 if (!pt->sync_switch)
1410 goto out; 1516 return 1;
1411 1517
1412 ptq = intel_pt_cpu_to_ptq(pt, cpu); 1518 ptq = intel_pt_cpu_to_ptq(pt, cpu);
1413 if (!ptq) 1519 if (!ptq)
1414 goto out; 1520 return 1;
1415 1521
1416 switch (ptq->switch_state) { 1522 switch (ptq->switch_state) {
1417 case INTEL_PT_SS_NOT_TRACING: 1523 case INTEL_PT_SS_NOT_TRACING:
@@ -1424,7 +1530,7 @@ static int intel_pt_process_switch(struct intel_pt *pt,
1424 return 0; 1530 return 0;
1425 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: 1531 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1426 if (!ptq->on_heap) { 1532 if (!ptq->on_heap) {
1427 ptq->timestamp = perf_time_to_tsc(sample->time, 1533 ptq->timestamp = perf_time_to_tsc(timestamp,
1428 &pt->tc); 1534 &pt->tc);
1429 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr, 1535 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
1430 ptq->timestamp); 1536 ptq->timestamp);
@@ -1441,10 +1547,76 @@ static int intel_pt_process_switch(struct intel_pt *pt,
1441 default: 1547 default:
1442 break; 1548 break;
1443 } 1549 }
1444out: 1550
1551 return 1;
1552}
1553
1554static int intel_pt_process_switch(struct intel_pt *pt,
1555 struct perf_sample *sample)
1556{
1557 struct perf_evsel *evsel;
1558 pid_t tid;
1559 int cpu, ret;
1560
1561 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1562 if (evsel != pt->switch_evsel)
1563 return 0;
1564
1565 tid = perf_evsel__intval(evsel, sample, "next_pid");
1566 cpu = sample->cpu;
1567
1568 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1569 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1570 &pt->tc));
1571
1572 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1573 if (ret <= 0)
1574 return ret;
1575
1445 return machine__set_current_tid(pt->machine, cpu, -1, tid); 1576 return machine__set_current_tid(pt->machine, cpu, -1, tid);
1446} 1577}
1447 1578
1579static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
1580 struct perf_sample *sample)
1581{
1582 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1583 pid_t pid, tid;
1584 int cpu, ret;
1585
1586 cpu = sample->cpu;
1587
1588 if (pt->have_sched_switch == 3) {
1589 if (!out)
1590 return 0;
1591 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
1592 pr_err("Expecting CPU-wide context switch event\n");
1593 return -EINVAL;
1594 }
1595 pid = event->context_switch.next_prev_pid;
1596 tid = event->context_switch.next_prev_tid;
1597 } else {
1598 if (out)
1599 return 0;
1600 pid = sample->pid;
1601 tid = sample->tid;
1602 }
1603
1604 if (tid == -1) {
1605 pr_err("context_switch event has no tid\n");
1606 return -EINVAL;
1607 }
1608
1609 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1610 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
1611 &pt->tc));
1612
1613 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1614 if (ret <= 0)
1615 return ret;
1616
1617 return machine__set_current_tid(pt->machine, cpu, pid, tid);
1618}
1619
1448static int intel_pt_process_itrace_start(struct intel_pt *pt, 1620static int intel_pt_process_itrace_start(struct intel_pt *pt,
1449 union perf_event *event, 1621 union perf_event *event,
1450 struct perf_sample *sample) 1622 struct perf_sample *sample)
@@ -1515,6 +1687,9 @@ static int intel_pt_process_event(struct perf_session *session,
1515 err = intel_pt_process_switch(pt, sample); 1687 err = intel_pt_process_switch(pt, sample);
1516 else if (event->header.type == PERF_RECORD_ITRACE_START) 1688 else if (event->header.type == PERF_RECORD_ITRACE_START)
1517 err = intel_pt_process_itrace_start(pt, event, sample); 1689 err = intel_pt_process_itrace_start(pt, event, sample);
1690 else if (event->header.type == PERF_RECORD_SWITCH ||
1691 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
1692 err = intel_pt_context_switch(pt, event, sample);
1518 1693
1519 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n", 1694 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
1520 perf_event__name(event->header.type), event->header.type, 1695 perf_event__name(event->header.type), event->header.type,
@@ -1700,6 +1875,8 @@ static int intel_pt_synth_events(struct intel_pt *pt,
1700 pt->instructions_sample_period = attr.sample_period; 1875 pt->instructions_sample_period = attr.sample_period;
1701 if (pt->synth_opts.callchain) 1876 if (pt->synth_opts.callchain)
1702 attr.sample_type |= PERF_SAMPLE_CALLCHAIN; 1877 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
1878 if (pt->synth_opts.last_branch)
1879 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1703 pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 1880 pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
1704 id, (u64)attr.sample_type); 1881 id, (u64)attr.sample_type);
1705 err = intel_pt_synth_event(session, &attr, id); 1882 err = intel_pt_synth_event(session, &attr, id);
@@ -1719,6 +1896,8 @@ static int intel_pt_synth_events(struct intel_pt *pt,
1719 attr.sample_period = 1; 1896 attr.sample_period = 1;
1720 if (pt->synth_opts.callchain) 1897 if (pt->synth_opts.callchain)
1721 attr.sample_type |= PERF_SAMPLE_CALLCHAIN; 1898 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
1899 if (pt->synth_opts.last_branch)
1900 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1722 pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 1901 pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
1723 id, (u64)attr.sample_type); 1902 id, (u64)attr.sample_type);
1724 err = intel_pt_synth_event(session, &attr, id); 1903 err = intel_pt_synth_event(session, &attr, id);
@@ -1745,6 +1924,7 @@ static int intel_pt_synth_events(struct intel_pt *pt,
1745 attr.sample_period = 1; 1924 attr.sample_period = 1;
1746 attr.sample_type |= PERF_SAMPLE_ADDR; 1925 attr.sample_type |= PERF_SAMPLE_ADDR;
1747 attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN; 1926 attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN;
1927 attr.sample_type &= ~(u64)PERF_SAMPLE_BRANCH_STACK;
1748 pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 1928 pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
1749 id, (u64)attr.sample_type); 1929 id, (u64)attr.sample_type);
1750 err = intel_pt_synth_event(session, &attr, id); 1930 err = intel_pt_synth_event(session, &attr, id);
@@ -1777,6 +1957,28 @@ static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
1777 return NULL; 1957 return NULL;
1778} 1958}
1779 1959
1960static bool intel_pt_find_switch(struct perf_evlist *evlist)
1961{
1962 struct perf_evsel *evsel;
1963
1964 evlist__for_each(evlist, evsel) {
1965 if (evsel->attr.context_switch)
1966 return true;
1967 }
1968
1969 return false;
1970}
1971
1972static int intel_pt_perf_config(const char *var, const char *value, void *data)
1973{
1974 struct intel_pt *pt = data;
1975
1976 if (!strcmp(var, "intel-pt.mispred-all"))
1977 pt->mispred_all = perf_config_bool(var, value);
1978
1979 return 0;
1980}
1981
1780static const char * const intel_pt_info_fmts[] = { 1982static const char * const intel_pt_info_fmts[] = {
1781 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n", 1983 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
1782 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n", 1984 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
@@ -1821,6 +2023,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
1821 if (!pt) 2023 if (!pt)
1822 return -ENOMEM; 2024 return -ENOMEM;
1823 2025
2026 perf_config(intel_pt_perf_config, pt);
2027
1824 err = auxtrace_queues__init(&pt->queues); 2028 err = auxtrace_queues__init(&pt->queues);
1825 if (err) 2029 if (err)
1826 goto err_free; 2030 goto err_free;
@@ -1888,6 +2092,10 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
1888 pr_err("%s: missing sched_switch event\n", __func__); 2092 pr_err("%s: missing sched_switch event\n", __func__);
1889 goto err_delete_thread; 2093 goto err_delete_thread;
1890 } 2094 }
2095 } else if (pt->have_sched_switch == 2 &&
2096 !intel_pt_find_switch(session->evlist)) {
2097 pr_err("%s: missing context_switch attribute flag\n", __func__);
2098 goto err_delete_thread;
1891 } 2099 }
1892 2100
1893 if (session->itrace_synth_opts && session->itrace_synth_opts->set) { 2101 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6309f7ceb08f..5ef90be2a249 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -35,6 +35,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
35 machine->last_match = NULL; 35 machine->last_match = NULL;
36 36
37 machine->vdso_info = NULL; 37 machine->vdso_info = NULL;
38 machine->env = NULL;
38 39
39 machine->pid = pid; 40 machine->pid = pid;
40 41
@@ -624,7 +625,7 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
624{ 625{
625 int i; 626 int i;
626 size_t printed = 0; 627 size_t printed = 0;
627 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; 628 struct dso *kdso = machine__kernel_map(machine)->dso;
628 629
629 if (kdso->has_build_id) { 630 if (kdso->has_build_id) {
630 char filename[PATH_MAX]; 631 char filename[PATH_MAX];
@@ -740,6 +741,7 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
740 741
741 for (type = 0; type < MAP__NR_TYPES; ++type) { 742 for (type = 0; type < MAP__NR_TYPES; ++type) {
742 struct kmap *kmap; 743 struct kmap *kmap;
744 struct map *map;
743 745
744 machine->vmlinux_maps[type] = map__new2(start, kernel, type); 746 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
745 if (machine->vmlinux_maps[type] == NULL) 747 if (machine->vmlinux_maps[type] == NULL)
@@ -748,13 +750,13 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
748 machine->vmlinux_maps[type]->map_ip = 750 machine->vmlinux_maps[type]->map_ip =
749 machine->vmlinux_maps[type]->unmap_ip = 751 machine->vmlinux_maps[type]->unmap_ip =
750 identity__map_ip; 752 identity__map_ip;
751 kmap = map__kmap(machine->vmlinux_maps[type]); 753 map = __machine__kernel_map(machine, type);
754 kmap = map__kmap(map);
752 if (!kmap) 755 if (!kmap)
753 return -1; 756 return -1;
754 757
755 kmap->kmaps = &machine->kmaps; 758 kmap->kmaps = &machine->kmaps;
756 map_groups__insert(&machine->kmaps, 759 map_groups__insert(&machine->kmaps, map);
757 machine->vmlinux_maps[type]);
758 } 760 }
759 761
760 return 0; 762 return 0;
@@ -766,13 +768,13 @@ void machine__destroy_kernel_maps(struct machine *machine)
766 768
767 for (type = 0; type < MAP__NR_TYPES; ++type) { 769 for (type = 0; type < MAP__NR_TYPES; ++type) {
768 struct kmap *kmap; 770 struct kmap *kmap;
771 struct map *map = __machine__kernel_map(machine, type);
769 772
770 if (machine->vmlinux_maps[type] == NULL) 773 if (map == NULL)
771 continue; 774 continue;
772 775
773 kmap = map__kmap(machine->vmlinux_maps[type]); 776 kmap = map__kmap(map);
774 map_groups__remove(&machine->kmaps, 777 map_groups__remove(&machine->kmaps, map);
775 machine->vmlinux_maps[type]);
776 if (kmap && kmap->ref_reloc_sym) { 778 if (kmap && kmap->ref_reloc_sym) {
777 /* 779 /*
778 * ref_reloc_sym is shared among all maps, so free just 780 * ref_reloc_sym is shared among all maps, so free just
@@ -866,7 +868,7 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid)
866int machine__load_kallsyms(struct machine *machine, const char *filename, 868int machine__load_kallsyms(struct machine *machine, const char *filename,
867 enum map_type type, symbol_filter_t filter) 869 enum map_type type, symbol_filter_t filter)
868{ 870{
869 struct map *map = machine->vmlinux_maps[type]; 871 struct map *map = machine__kernel_map(machine);
870 int ret = dso__load_kallsyms(map->dso, filename, map, filter); 872 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
871 873
872 if (ret > 0) { 874 if (ret > 0) {
@@ -885,7 +887,7 @@ int machine__load_kallsyms(struct machine *machine, const char *filename,
885int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 887int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
886 symbol_filter_t filter) 888 symbol_filter_t filter)
887{ 889{
888 struct map *map = machine->vmlinux_maps[type]; 890 struct map *map = machine__kernel_map(machine);
889 int ret = dso__load_vmlinux_path(map->dso, map, filter); 891 int ret = dso__load_vmlinux_path(map->dso, map, filter);
890 892
891 if (ret > 0) 893 if (ret > 0)
@@ -1243,8 +1245,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
1243 /* 1245 /*
1244 * preload dso of guest kernel and modules 1246 * preload dso of guest kernel and modules
1245 */ 1247 */
1246 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], 1248 dso__load(kernel, machine__kernel_map(machine), NULL);
1247 NULL);
1248 } 1249 }
1249 } 1250 }
1250 return 0; 1251 return 0;
@@ -1830,7 +1831,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
1830 } 1831 }
1831 1832
1832check_calls: 1833check_calls:
1833 if (chain->nr > PERF_MAX_STACK_DEPTH) { 1834 if (chain->nr > PERF_MAX_STACK_DEPTH && (int)chain->nr > max_stack) {
1834 pr_warning("corrupted callchain. skipping...\n"); 1835 pr_warning("corrupted callchain. skipping...\n");
1835 return 0; 1836 return 0;
1836 } 1837 }
@@ -1996,7 +1997,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1996 1997
1997int machine__get_kernel_start(struct machine *machine) 1998int machine__get_kernel_start(struct machine *machine)
1998{ 1999{
1999 struct map *map = machine__kernel_map(machine, MAP__FUNCTION); 2000 struct map *map = machine__kernel_map(machine);
2000 int err = 0; 2001 int err = 0;
2001 2002
2002 /* 2003 /*
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index ea5cb4a621db..2c2b443df5ba 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -34,6 +34,7 @@ struct machine {
34 struct list_head dead_threads; 34 struct list_head dead_threads;
35 struct thread *last_match; 35 struct thread *last_match;
36 struct vdso_info *vdso_info; 36 struct vdso_info *vdso_info;
37 struct perf_env *env;
37 struct dsos dsos; 38 struct dsos dsos;
38 struct map_groups kmaps; 39 struct map_groups kmaps;
39 struct map *vmlinux_maps[MAP__NR_TYPES]; 40 struct map *vmlinux_maps[MAP__NR_TYPES];
@@ -47,11 +48,17 @@ struct machine {
47}; 48};
48 49
49static inline 50static inline
50struct map *machine__kernel_map(struct machine *machine, enum map_type type) 51struct map *__machine__kernel_map(struct machine *machine, enum map_type type)
51{ 52{
52 return machine->vmlinux_maps[type]; 53 return machine->vmlinux_maps[type];
53} 54}
54 55
56static inline
57struct map *machine__kernel_map(struct machine *machine)
58{
59 return __machine__kernel_map(machine, MAP__FUNCTION);
60}
61
55int machine__get_kernel_start(struct machine *machine); 62int machine__get_kernel_start(struct machine *machine);
56 63
57static inline u64 machine__kernel_start(struct machine *machine) 64static inline u64 machine__kernel_start(struct machine *machine)
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index b1c475d9b240..4e38c396a897 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -235,7 +235,7 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
235 */ 235 */
236bool __map__is_kernel(const struct map *map) 236bool __map__is_kernel(const struct map *map)
237{ 237{
238 return map->groups->machine->vmlinux_maps[map->type] == map; 238 return __machine__kernel_map(map->groups->machine, map->type) == map;
239} 239}
240 240
241static void map__exit(struct map *map) 241static void map__exit(struct map *map)
@@ -553,13 +553,9 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg,
553 return NULL; 553 return NULL;
554} 554}
555 555
556struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 556struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
557 enum map_type type, 557 struct map **mapp, symbol_filter_t filter)
558 const char *name,
559 struct map **mapp,
560 symbol_filter_t filter)
561{ 558{
562 struct maps *maps = &mg->maps[type];
563 struct symbol *sym; 559 struct symbol *sym;
564 struct rb_node *nd; 560 struct rb_node *nd;
565 561
@@ -583,6 +579,17 @@ out:
583 return sym; 579 return sym;
584} 580}
585 581
582struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
583 enum map_type type,
584 const char *name,
585 struct map **mapp,
586 symbol_filter_t filter)
587{
588 struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter);
589
590 return sym;
591}
592
586int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter) 593int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
587{ 594{
588 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { 595 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 57829e89b78b..7309d64ce39e 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -190,6 +190,8 @@ void maps__remove(struct maps *maps, struct map *map);
190struct map *maps__find(struct maps *maps, u64 addr); 190struct map *maps__find(struct maps *maps, u64 addr);
191struct map *maps__first(struct maps *maps); 191struct map *maps__first(struct maps *maps);
192struct map *map__next(struct map *map); 192struct map *map__next(struct map *map);
193struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
194 struct map **mapp, symbol_filter_t filter);
193void map_groups__init(struct map_groups *mg, struct machine *machine); 195void map_groups__init(struct map_groups *mg, struct machine *machine);
194void map_groups__exit(struct map_groups *mg); 196void map_groups__exit(struct map_groups *mg);
195int map_groups__clone(struct map_groups *mg, 197int map_groups__clone(struct map_groups *mg,
diff --git a/tools/perf/util/parse-branch-options.c b/tools/perf/util/parse-branch-options.c
index a3b1e13a05c0..355eecf6bf59 100644
--- a/tools/perf/util/parse-branch-options.c
+++ b/tools/perf/util/parse-branch-options.c
@@ -27,6 +27,7 @@ static const struct branch_mode branch_modes[] = {
27 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX), 27 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
28 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND), 28 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
29 BRANCH_OPT("ind_jmp", PERF_SAMPLE_BRANCH_IND_JUMP), 29 BRANCH_OPT("ind_jmp", PERF_SAMPLE_BRANCH_IND_JUMP),
30 BRANCH_OPT("call", PERF_SAMPLE_BRANCH_CALL),
30 BRANCH_END 31 BRANCH_END
31}; 32};
32 33
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 21ed6ee63da9..bee60583839a 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1,4 +1,5 @@
1#include <linux/hw_breakpoint.h> 1#include <linux/hw_breakpoint.h>
2#include <linux/err.h>
2#include "util.h" 3#include "util.h"
3#include "../perf.h" 4#include "../perf.h"
4#include "evlist.h" 5#include "evlist.h"
@@ -10,8 +11,9 @@
10#include "symbol.h" 11#include "symbol.h"
11#include "cache.h" 12#include "cache.h"
12#include "header.h" 13#include "header.h"
14#include "bpf-loader.h"
13#include "debug.h" 15#include "debug.h"
14#include <api/fs/debugfs.h> 16#include <api/fs/tracing_path.h>
15#include "parse-events-bison.h" 17#include "parse-events-bison.h"
16#define YY_EXTRA_TYPE int 18#define YY_EXTRA_TYPE int
17#include "parse-events-flex.h" 19#include "parse-events-flex.h"
@@ -26,6 +28,8 @@
26extern int parse_events_debug; 28extern int parse_events_debug;
27#endif 29#endif
28int parse_events_parse(void *data, void *scanner); 30int parse_events_parse(void *data, void *scanner);
31static int get_config_terms(struct list_head *head_config,
32 struct list_head *head_terms __maybe_unused);
29 33
30static struct perf_pmu_event_symbol *perf_pmu_events_list; 34static struct perf_pmu_event_symbol *perf_pmu_events_list;
31/* 35/*
@@ -386,32 +390,72 @@ int parse_events_add_cache(struct list_head *list, int *idx,
386 return add_event(list, idx, &attr, name, NULL); 390 return add_event(list, idx, &attr, name, NULL);
387} 391}
388 392
393static void tracepoint_error(struct parse_events_error *e, int err,
394 char *sys, char *name)
395{
396 char help[BUFSIZ];
397
398 /*
399 * We get error directly from syscall errno ( > 0),
400 * or from encoded pointer's error ( < 0).
401 */
402 err = abs(err);
403
404 switch (err) {
405 case EACCES:
406 e->str = strdup("can't access trace events");
407 break;
408 case ENOENT:
409 e->str = strdup("unknown tracepoint");
410 break;
411 default:
412 e->str = strdup("failed to add tracepoint");
413 break;
414 }
415
416 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
417 e->help = strdup(help);
418}
419
389static int add_tracepoint(struct list_head *list, int *idx, 420static int add_tracepoint(struct list_head *list, int *idx,
390 char *sys_name, char *evt_name) 421 char *sys_name, char *evt_name,
422 struct parse_events_error *err,
423 struct list_head *head_config)
391{ 424{
392 struct perf_evsel *evsel; 425 struct perf_evsel *evsel;
393 426
394 evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++); 427 evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
395 if (!evsel) 428 if (IS_ERR(evsel)) {
396 return -ENOMEM; 429 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
430 return PTR_ERR(evsel);
431 }
397 432
398 list_add_tail(&evsel->node, list); 433 if (head_config) {
434 LIST_HEAD(config_terms);
399 435
436 if (get_config_terms(head_config, &config_terms))
437 return -ENOMEM;
438 list_splice(&config_terms, &evsel->config_terms);
439 }
440
441 list_add_tail(&evsel->node, list);
400 return 0; 442 return 0;
401} 443}
402 444
403static int add_tracepoint_multi_event(struct list_head *list, int *idx, 445static int add_tracepoint_multi_event(struct list_head *list, int *idx,
404 char *sys_name, char *evt_name) 446 char *sys_name, char *evt_name,
447 struct parse_events_error *err,
448 struct list_head *head_config)
405{ 449{
406 char evt_path[MAXPATHLEN]; 450 char evt_path[MAXPATHLEN];
407 struct dirent *evt_ent; 451 struct dirent *evt_ent;
408 DIR *evt_dir; 452 DIR *evt_dir;
409 int ret = 0; 453 int ret = 0, found = 0;
410 454
411 snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); 455 snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
412 evt_dir = opendir(evt_path); 456 evt_dir = opendir(evt_path);
413 if (!evt_dir) { 457 if (!evt_dir) {
414 perror("Can't open event dir"); 458 tracepoint_error(err, errno, sys_name, evt_name);
415 return -1; 459 return -1;
416 } 460 }
417 461
@@ -425,7 +469,15 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
425 if (!strglobmatch(evt_ent->d_name, evt_name)) 469 if (!strglobmatch(evt_ent->d_name, evt_name))
426 continue; 470 continue;
427 471
428 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); 472 found++;
473
474 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
475 err, head_config);
476 }
477
478 if (!found) {
479 tracepoint_error(err, ENOENT, sys_name, evt_name);
480 ret = -1;
429 } 481 }
430 482
431 closedir(evt_dir); 483 closedir(evt_dir);
@@ -433,15 +485,21 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
433} 485}
434 486
435static int add_tracepoint_event(struct list_head *list, int *idx, 487static int add_tracepoint_event(struct list_head *list, int *idx,
436 char *sys_name, char *evt_name) 488 char *sys_name, char *evt_name,
489 struct parse_events_error *err,
490 struct list_head *head_config)
437{ 491{
438 return strpbrk(evt_name, "*?") ? 492 return strpbrk(evt_name, "*?") ?
439 add_tracepoint_multi_event(list, idx, sys_name, evt_name) : 493 add_tracepoint_multi_event(list, idx, sys_name, evt_name,
440 add_tracepoint(list, idx, sys_name, evt_name); 494 err, head_config) :
495 add_tracepoint(list, idx, sys_name, evt_name,
496 err, head_config);
441} 497}
442 498
443static int add_tracepoint_multi_sys(struct list_head *list, int *idx, 499static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
444 char *sys_name, char *evt_name) 500 char *sys_name, char *evt_name,
501 struct parse_events_error *err,
502 struct list_head *head_config)
445{ 503{
446 struct dirent *events_ent; 504 struct dirent *events_ent;
447 DIR *events_dir; 505 DIR *events_dir;
@@ -449,7 +507,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
449 507
450 events_dir = opendir(tracing_events_path); 508 events_dir = opendir(tracing_events_path);
451 if (!events_dir) { 509 if (!events_dir) {
452 perror("Can't open event dir"); 510 tracepoint_error(err, errno, sys_name, evt_name);
453 return -1; 511 return -1;
454 } 512 }
455 513
@@ -465,20 +523,135 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
465 continue; 523 continue;
466 524
467 ret = add_tracepoint_event(list, idx, events_ent->d_name, 525 ret = add_tracepoint_event(list, idx, events_ent->d_name,
468 evt_name); 526 evt_name, err, head_config);
469 } 527 }
470 528
471 closedir(events_dir); 529 closedir(events_dir);
472 return ret; 530 return ret;
473} 531}
474 532
475int parse_events_add_tracepoint(struct list_head *list, int *idx, 533struct __add_bpf_event_param {
476 char *sys, char *event) 534 struct parse_events_evlist *data;
535 struct list_head *list;
536};
537
538static int add_bpf_event(struct probe_trace_event *tev, int fd,
539 void *_param)
477{ 540{
478 if (strpbrk(sys, "*?")) 541 LIST_HEAD(new_evsels);
479 return add_tracepoint_multi_sys(list, idx, sys, event); 542 struct __add_bpf_event_param *param = _param;
480 else 543 struct parse_events_evlist *evlist = param->data;
481 return add_tracepoint_event(list, idx, sys, event); 544 struct list_head *list = param->list;
545 struct perf_evsel *pos;
546 int err;
547
548 pr_debug("add bpf event %s:%s and attach bpf program %d\n",
549 tev->group, tev->event, fd);
550
551 err = parse_events_add_tracepoint(&new_evsels, &evlist->idx, tev->group,
552 tev->event, evlist->error, NULL);
553 if (err) {
554 struct perf_evsel *evsel, *tmp;
555
556 pr_debug("Failed to add BPF event %s:%s\n",
557 tev->group, tev->event);
558 list_for_each_entry_safe(evsel, tmp, &new_evsels, node) {
559 list_del(&evsel->node);
560 perf_evsel__delete(evsel);
561 }
562 return err;
563 }
564 pr_debug("adding %s:%s\n", tev->group, tev->event);
565
566 list_for_each_entry(pos, &new_evsels, node) {
567 pr_debug("adding %s:%s to %p\n",
568 tev->group, tev->event, pos);
569 pos->bpf_fd = fd;
570 }
571 list_splice(&new_evsels, list);
572 return 0;
573}
574
575int parse_events_load_bpf_obj(struct parse_events_evlist *data,
576 struct list_head *list,
577 struct bpf_object *obj)
578{
579 int err;
580 char errbuf[BUFSIZ];
581 struct __add_bpf_event_param param = {data, list};
582 static bool registered_unprobe_atexit = false;
583
584 if (IS_ERR(obj) || !obj) {
585 snprintf(errbuf, sizeof(errbuf),
586 "Internal error: load bpf obj with NULL");
587 err = -EINVAL;
588 goto errout;
589 }
590
591 /*
592 * Register atexit handler before calling bpf__probe() so
593 * bpf__probe() don't need to unprobe probe points its already
594 * created when failure.
595 */
596 if (!registered_unprobe_atexit) {
597 atexit(bpf__clear);
598 registered_unprobe_atexit = true;
599 }
600
601 err = bpf__probe(obj);
602 if (err) {
603 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf));
604 goto errout;
605 }
606
607 err = bpf__load(obj);
608 if (err) {
609 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf));
610 goto errout;
611 }
612
613 err = bpf__foreach_tev(obj, add_bpf_event, &param);
614 if (err) {
615 snprintf(errbuf, sizeof(errbuf),
616 "Attach events in BPF object failed");
617 goto errout;
618 }
619
620 return 0;
621errout:
622 data->error->help = strdup("(add -v to see detail)");
623 data->error->str = strdup(errbuf);
624 return err;
625}
626
627int parse_events_load_bpf(struct parse_events_evlist *data,
628 struct list_head *list,
629 char *bpf_file_name,
630 bool source)
631{
632 struct bpf_object *obj;
633
634 obj = bpf__prepare_load(bpf_file_name, source);
635 if (IS_ERR(obj) || !obj) {
636 char errbuf[BUFSIZ];
637 int err;
638
639 err = obj ? PTR_ERR(obj) : -EINVAL;
640
641 if (err == -ENOTSUP)
642 snprintf(errbuf, sizeof(errbuf),
643 "BPF support is not compiled");
644 else
645 snprintf(errbuf, sizeof(errbuf),
646 "BPF object file '%s' is invalid",
647 bpf_file_name);
648
649 data->error->help = strdup("(add -v to see detail)");
650 data->error->str = strdup(errbuf);
651 return err;
652 }
653
654 return parse_events_load_bpf_obj(data, list, obj);
482} 655}
483 656
484static int 657static int
@@ -565,9 +738,13 @@ static int check_type_val(struct parse_events_term *term,
565 return -EINVAL; 738 return -EINVAL;
566} 739}
567 740
568static int config_term(struct perf_event_attr *attr, 741typedef int config_term_func_t(struct perf_event_attr *attr,
569 struct parse_events_term *term, 742 struct parse_events_term *term,
570 struct parse_events_error *err) 743 struct parse_events_error *err);
744
745static int config_term_common(struct perf_event_attr *attr,
746 struct parse_events_term *term,
747 struct parse_events_error *err)
571{ 748{
572#define CHECK_TYPE_VAL(type) \ 749#define CHECK_TYPE_VAL(type) \
573do { \ 750do { \
@@ -576,12 +753,6 @@ do { \
576} while (0) 753} while (0)
577 754
578 switch (term->type_term) { 755 switch (term->type_term) {
579 case PARSE_EVENTS__TERM_TYPE_USER:
580 /*
581 * Always succeed for sysfs terms, as we dont know
582 * at this point what type they need to have.
583 */
584 return 0;
585 case PARSE_EVENTS__TERM_TYPE_CONFIG: 756 case PARSE_EVENTS__TERM_TYPE_CONFIG:
586 CHECK_TYPE_VAL(NUM); 757 CHECK_TYPE_VAL(NUM);
587 attr->config = term->val.num; 758 attr->config = term->val.num;
@@ -620,10 +791,19 @@ do { \
620 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 791 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
621 CHECK_TYPE_VAL(NUM); 792 CHECK_TYPE_VAL(NUM);
622 break; 793 break;
794 case PARSE_EVENTS__TERM_TYPE_INHERIT:
795 CHECK_TYPE_VAL(NUM);
796 break;
797 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
798 CHECK_TYPE_VAL(NUM);
799 break;
623 case PARSE_EVENTS__TERM_TYPE_NAME: 800 case PARSE_EVENTS__TERM_TYPE_NAME:
624 CHECK_TYPE_VAL(STR); 801 CHECK_TYPE_VAL(STR);
625 break; 802 break;
626 default: 803 default:
804 err->str = strdup("unknown term");
805 err->idx = term->err_term;
806 err->help = parse_events_formats_error_string(NULL);
627 return -EINVAL; 807 return -EINVAL;
628 } 808 }
629 809
@@ -631,9 +811,46 @@ do { \
631#undef CHECK_TYPE_VAL 811#undef CHECK_TYPE_VAL
632} 812}
633 813
814static int config_term_pmu(struct perf_event_attr *attr,
815 struct parse_events_term *term,
816 struct parse_events_error *err)
817{
818 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER)
819 /*
820 * Always succeed for sysfs terms, as we dont know
821 * at this point what type they need to have.
822 */
823 return 0;
824 else
825 return config_term_common(attr, term, err);
826}
827
828static int config_term_tracepoint(struct perf_event_attr *attr,
829 struct parse_events_term *term,
830 struct parse_events_error *err)
831{
832 switch (term->type_term) {
833 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
834 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
835 case PARSE_EVENTS__TERM_TYPE_INHERIT:
836 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
837 return config_term_common(attr, term, err);
838 default:
839 if (err) {
840 err->idx = term->err_term;
841 err->str = strdup("unknown term");
842 err->help = strdup("valid terms: call-graph,stack-size\n");
843 }
844 return -EINVAL;
845 }
846
847 return 0;
848}
849
634static int config_attr(struct perf_event_attr *attr, 850static int config_attr(struct perf_event_attr *attr,
635 struct list_head *head, 851 struct list_head *head,
636 struct parse_events_error *err) 852 struct parse_events_error *err,
853 config_term_func_t config_term)
637{ 854{
638 struct parse_events_term *term; 855 struct parse_events_term *term;
639 856
@@ -680,6 +897,12 @@ do { \
680 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 897 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
681 ADD_CONFIG_TERM(STACK_USER, stack_user, term->val.num); 898 ADD_CONFIG_TERM(STACK_USER, stack_user, term->val.num);
682 break; 899 break;
900 case PARSE_EVENTS__TERM_TYPE_INHERIT:
901 ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 1 : 0);
902 break;
903 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
904 ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 0 : 1);
905 break;
683 default: 906 default:
684 break; 907 break;
685 } 908 }
@@ -688,6 +911,27 @@ do { \
688 return 0; 911 return 0;
689} 912}
690 913
914int parse_events_add_tracepoint(struct list_head *list, int *idx,
915 char *sys, char *event,
916 struct parse_events_error *err,
917 struct list_head *head_config)
918{
919 if (head_config) {
920 struct perf_event_attr attr;
921
922 if (config_attr(&attr, head_config, err,
923 config_term_tracepoint))
924 return -EINVAL;
925 }
926
927 if (strpbrk(sys, "*?"))
928 return add_tracepoint_multi_sys(list, idx, sys, event,
929 err, head_config);
930 else
931 return add_tracepoint_event(list, idx, sys, event,
932 err, head_config);
933}
934
691int parse_events_add_numeric(struct parse_events_evlist *data, 935int parse_events_add_numeric(struct parse_events_evlist *data,
692 struct list_head *list, 936 struct list_head *list,
693 u32 type, u64 config, 937 u32 type, u64 config,
@@ -701,7 +945,8 @@ int parse_events_add_numeric(struct parse_events_evlist *data,
701 attr.config = config; 945 attr.config = config;
702 946
703 if (head_config) { 947 if (head_config) {
704 if (config_attr(&attr, head_config, data->error)) 948 if (config_attr(&attr, head_config, data->error,
949 config_term_common))
705 return -EINVAL; 950 return -EINVAL;
706 951
707 if (get_config_terms(head_config, &config_terms)) 952 if (get_config_terms(head_config, &config_terms))
@@ -761,7 +1006,7 @@ int parse_events_add_pmu(struct parse_events_evlist *data,
761 * Configure hardcoded terms first, no need to check 1006 * Configure hardcoded terms first, no need to check
762 * return value when called with fail == 0 ;) 1007 * return value when called with fail == 0 ;)
763 */ 1008 */
764 if (config_attr(&attr, head_config, data->error)) 1009 if (config_attr(&attr, head_config, data->error, config_term_pmu))
765 return -EINVAL; 1010 return -EINVAL;
766 1011
767 if (get_config_terms(head_config, &config_terms)) 1012 if (get_config_terms(head_config, &config_terms))
@@ -793,6 +1038,11 @@ void parse_events__set_leader(char *name, struct list_head *list)
793{ 1038{
794 struct perf_evsel *leader; 1039 struct perf_evsel *leader;
795 1040
1041 if (list_empty(list)) {
1042 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1043 return;
1044 }
1045
796 __perf_evlist__set_leader(list); 1046 __perf_evlist__set_leader(list);
797 leader = list_entry(list->next, struct perf_evsel, node); 1047 leader = list_entry(list->next, struct perf_evsel, node);
798 leader->group_name = name ? strdup(name) : NULL; 1048 leader->group_name = name ? strdup(name) : NULL;
@@ -819,6 +1069,7 @@ struct event_modifier {
819 int eG; 1069 int eG;
820 int eI; 1070 int eI;
821 int precise; 1071 int precise;
1072 int precise_max;
822 int exclude_GH; 1073 int exclude_GH;
823 int sample_read; 1074 int sample_read;
824 int pinned; 1075 int pinned;
@@ -834,6 +1085,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
834 int eG = evsel ? evsel->attr.exclude_guest : 0; 1085 int eG = evsel ? evsel->attr.exclude_guest : 0;
835 int eI = evsel ? evsel->attr.exclude_idle : 0; 1086 int eI = evsel ? evsel->attr.exclude_idle : 0;
836 int precise = evsel ? evsel->attr.precise_ip : 0; 1087 int precise = evsel ? evsel->attr.precise_ip : 0;
1088 int precise_max = 0;
837 int sample_read = 0; 1089 int sample_read = 0;
838 int pinned = evsel ? evsel->attr.pinned : 0; 1090 int pinned = evsel ? evsel->attr.pinned : 0;
839 1091
@@ -870,6 +1122,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
870 /* use of precise requires exclude_guest */ 1122 /* use of precise requires exclude_guest */
871 if (!exclude_GH) 1123 if (!exclude_GH)
872 eG = 1; 1124 eG = 1;
1125 } else if (*str == 'P') {
1126 precise_max = 1;
873 } else if (*str == 'S') { 1127 } else if (*str == 'S') {
874 sample_read = 1; 1128 sample_read = 1;
875 } else if (*str == 'D') { 1129 } else if (*str == 'D') {
@@ -900,6 +1154,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
900 mod->eG = eG; 1154 mod->eG = eG;
901 mod->eI = eI; 1155 mod->eI = eI;
902 mod->precise = precise; 1156 mod->precise = precise;
1157 mod->precise_max = precise_max;
903 mod->exclude_GH = exclude_GH; 1158 mod->exclude_GH = exclude_GH;
904 mod->sample_read = sample_read; 1159 mod->sample_read = sample_read;
905 mod->pinned = pinned; 1160 mod->pinned = pinned;
@@ -916,7 +1171,7 @@ static int check_modifier(char *str)
916 char *p = str; 1171 char *p = str;
917 1172
918 /* The sizeof includes 0 byte as well. */ 1173 /* The sizeof includes 0 byte as well. */
919 if (strlen(str) > (sizeof("ukhGHpppSDI") - 1)) 1174 if (strlen(str) > (sizeof("ukhGHpppPSDI") - 1))
920 return -1; 1175 return -1;
921 1176
922 while (*p) { 1177 while (*p) {
@@ -955,6 +1210,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
955 evsel->attr.exclude_idle = mod.eI; 1210 evsel->attr.exclude_idle = mod.eI;
956 evsel->exclude_GH = mod.exclude_GH; 1211 evsel->exclude_GH = mod.exclude_GH;
957 evsel->sample_read = mod.sample_read; 1212 evsel->sample_read = mod.sample_read;
1213 evsel->precise_max = mod.precise_max;
958 1214
959 if (perf_evsel__is_group_leader(evsel)) 1215 if (perf_evsel__is_group_leader(evsel))
960 evsel->attr.pinned = mod.pinned; 1216 evsel->attr.pinned = mod.pinned;
@@ -1142,6 +1398,11 @@ int parse_events(struct perf_evlist *evlist, const char *str,
1142 if (!ret) { 1398 if (!ret) {
1143 struct perf_evsel *last; 1399 struct perf_evsel *last;
1144 1400
1401 if (list_empty(&data.list)) {
1402 WARN_ONCE(true, "WARNING: event parser found nothing");
1403 return -1;
1404 }
1405
1145 perf_evlist__splice_list_tail(evlist, &data.list); 1406 perf_evlist__splice_list_tail(evlist, &data.list);
1146 evlist->nr_groups += data.nr_groups; 1407 evlist->nr_groups += data.nr_groups;
1147 last = perf_evlist__last(evlist); 1408 last = perf_evlist__last(evlist);
@@ -1251,6 +1512,12 @@ foreach_evsel_in_last_glob(struct perf_evlist *evlist,
1251 struct perf_evsel *last = NULL; 1512 struct perf_evsel *last = NULL;
1252 int err; 1513 int err;
1253 1514
1515 /*
1516 * Don't return when list_empty, give func a chance to report
1517 * error when it found last == NULL.
1518 *
1519 * So no need to WARN here, let *func do this.
1520 */
1254 if (evlist->nr_entries > 0) 1521 if (evlist->nr_entries > 0)
1255 last = perf_evlist__last(evlist); 1522 last = perf_evlist__last(evlist);
1256 1523
@@ -1419,7 +1686,7 @@ restart:
1419 printf(" %-50s [%s]\n", evt_list[evt_i++], 1686 printf(" %-50s [%s]\n", evt_list[evt_i++],
1420 event_type_descriptors[PERF_TYPE_TRACEPOINT]); 1687 event_type_descriptors[PERF_TYPE_TRACEPOINT]);
1421 } 1688 }
1422 if (evt_num) 1689 if (evt_num && pager_in_use())
1423 printf("\n"); 1690 printf("\n");
1424 1691
1425out_free: 1692out_free:
@@ -1575,7 +1842,7 @@ restart:
1575 printf(" %-50s [%s]\n", evt_list[evt_i++], 1842 printf(" %-50s [%s]\n", evt_list[evt_i++],
1576 event_type_descriptors[PERF_TYPE_HW_CACHE]); 1843 event_type_descriptors[PERF_TYPE_HW_CACHE]);
1577 } 1844 }
1578 if (evt_num) 1845 if (evt_num && pager_in_use())
1579 printf("\n"); 1846 printf("\n");
1580 1847
1581out_free: 1848out_free:
@@ -1648,7 +1915,7 @@ restart:
1648 } 1915 }
1649 printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]); 1916 printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]);
1650 } 1917 }
1651 if (evt_num) 1918 if (evt_num && pager_in_use())
1652 printf("\n"); 1919 printf("\n");
1653 1920
1654out_free: 1921out_free:
@@ -1689,13 +1956,14 @@ void print_events(const char *event_glob, bool name_only)
1689 printf(" %-50s [%s]\n", 1956 printf(" %-50s [%s]\n",
1690 "cpu/t1=v1[,t2=v2,t3 ...]/modifier", 1957 "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
1691 event_type_descriptors[PERF_TYPE_RAW]); 1958 event_type_descriptors[PERF_TYPE_RAW]);
1692 printf(" (see 'man perf-list' on how to encode it)\n"); 1959 if (pager_in_use())
1693 printf("\n"); 1960 printf(" (see 'man perf-list' on how to encode it)\n\n");
1694 1961
1695 printf(" %-50s [%s]\n", 1962 printf(" %-50s [%s]\n",
1696 "mem:<addr>[/len][:access]", 1963 "mem:<addr>[/len][:access]",
1697 event_type_descriptors[PERF_TYPE_BREAKPOINT]); 1964 event_type_descriptors[PERF_TYPE_BREAKPOINT]);
1698 printf("\n"); 1965 if (pager_in_use())
1966 printf("\n");
1699 } 1967 }
1700 1968
1701 print_tracepoint_events(NULL, NULL, name_only); 1969 print_tracepoint_events(NULL, NULL, name_only);
@@ -1811,3 +2079,29 @@ void parse_events_evlist_error(struct parse_events_evlist *data,
1811 err->str = strdup(str); 2079 err->str = strdup(str);
1812 WARN_ONCE(!err->str, "WARNING: failed to allocate error string"); 2080 WARN_ONCE(!err->str, "WARNING: failed to allocate error string");
1813} 2081}
2082
2083/*
2084 * Return string contains valid config terms of an event.
2085 * @additional_terms: For terms such as PMU sysfs terms.
2086 */
2087char *parse_events_formats_error_string(char *additional_terms)
2088{
2089 char *str;
2090 static const char *static_terms = "config,config1,config2,name,"
2091 "period,freq,branch_type,time,"
2092 "call-graph,stack-size\n";
2093
2094 /* valid terms */
2095 if (additional_terms) {
2096 if (!asprintf(&str, "valid terms: %s,%s",
2097 additional_terms, static_terms))
2098 goto fail;
2099 } else {
2100 if (!asprintf(&str, "valid terms: %s", static_terms))
2101 goto fail;
2102 }
2103 return str;
2104
2105fail:
2106 return NULL;
2107}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index a09b0e210997..f1a6db107241 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -67,6 +67,8 @@ enum {
67 PARSE_EVENTS__TERM_TYPE_TIME, 67 PARSE_EVENTS__TERM_TYPE_TIME,
68 PARSE_EVENTS__TERM_TYPE_CALLGRAPH, 68 PARSE_EVENTS__TERM_TYPE_CALLGRAPH,
69 PARSE_EVENTS__TERM_TYPE_STACKSIZE, 69 PARSE_EVENTS__TERM_TYPE_STACKSIZE,
70 PARSE_EVENTS__TERM_TYPE_NOINHERIT,
71 PARSE_EVENTS__TERM_TYPE_INHERIT
70}; 72};
71 73
72struct parse_events_term { 74struct parse_events_term {
@@ -118,7 +120,18 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add);
118int parse_events__modifier_group(struct list_head *list, char *event_mod); 120int parse_events__modifier_group(struct list_head *list, char *event_mod);
119int parse_events_name(struct list_head *list, char *name); 121int parse_events_name(struct list_head *list, char *name);
120int parse_events_add_tracepoint(struct list_head *list, int *idx, 122int parse_events_add_tracepoint(struct list_head *list, int *idx,
121 char *sys, char *event); 123 char *sys, char *event,
124 struct parse_events_error *error,
125 struct list_head *head_config);
126int parse_events_load_bpf(struct parse_events_evlist *data,
127 struct list_head *list,
128 char *bpf_file_name,
129 bool source);
130/* Provide this function for perf test */
131struct bpf_object;
132int parse_events_load_bpf_obj(struct parse_events_evlist *data,
133 struct list_head *list,
134 struct bpf_object *obj);
122int parse_events_add_numeric(struct parse_events_evlist *data, 135int parse_events_add_numeric(struct parse_events_evlist *data,
123 struct list_head *list, 136 struct list_head *list,
124 u32 type, u64 config, 137 u32 type, u64 config,
@@ -155,5 +168,6 @@ int print_hwcache_events(const char *event_glob, bool name_only);
155extern int is_valid_tracepoint(const char *event_string); 168extern int is_valid_tracepoint(const char *event_string);
156 169
157int valid_event_mount(const char *eventfs); 170int valid_event_mount(const char *eventfs);
171char *parse_events_formats_error_string(char *additional_terms);
158 172
159#endif /* __PERF_PARSE_EVENTS_H */ 173#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 936d566f48d8..58c5831ffd5c 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -115,6 +115,8 @@ do { \
115group [^,{}/]*[{][^}]*[}][^,{}/]* 115group [^,{}/]*[{][^}]*[}][^,{}/]*
116event_pmu [^,{}/]+[/][^/]*[/][^,{}/]* 116event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
117event [^,{}/]+ 117event [^,{}/]+
118bpf_object .*\.(o|bpf)
119bpf_source .*\.c
118 120
119num_dec [0-9]+ 121num_dec [0-9]+
120num_hex 0x[a-fA-F0-9]+ 122num_hex 0x[a-fA-F0-9]+
@@ -122,7 +124,7 @@ num_raw_hex [a-fA-F0-9]+
122name [a-zA-Z_*?][a-zA-Z0-9_*?.]* 124name [a-zA-Z_*?][a-zA-Z0-9_*?.]*
123name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.]* 125name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.]*
124/* If you add a modifier you need to update check_modifier() */ 126/* If you add a modifier you need to update check_modifier() */
125modifier_event [ukhpGHSDI]+ 127modifier_event [ukhpPGHSDI]+
126modifier_bp [rwx]{1,3} 128modifier_bp [rwx]{1,3}
127 129
128%% 130%%
@@ -159,6 +161,8 @@ modifier_bp [rwx]{1,3}
159 } 161 }
160 162
161{event_pmu} | 163{event_pmu} |
164{bpf_object} |
165{bpf_source} |
162{event} { 166{event} {
163 BEGIN(INITIAL); 167 BEGIN(INITIAL);
164 REWIND(1); 168 REWIND(1);
@@ -174,7 +178,7 @@ modifier_bp [rwx]{1,3}
174 178
175<config>{ 179<config>{
176 /* 180 /*
177 * Please update formats_error_string any time 181 * Please update parse_events_formats_error_string any time
178 * new static term is added. 182 * new static term is added.
179 */ 183 */
180config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); } 184config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
@@ -187,6 +191,8 @@ branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
187time { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_TIME); } 191time { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_TIME); }
188call-graph { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CALLGRAPH); } 192call-graph { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CALLGRAPH); }
189stack-size { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_STACKSIZE); } 193stack-size { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_STACKSIZE); }
194inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_INHERIT); }
195no-inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); }
190, { return ','; } 196, { return ','; }
191"/" { BEGIN(INITIAL); return '/'; } 197"/" { BEGIN(INITIAL); return '/'; }
192{name_minus} { return str(yyscanner, PE_NAME); } 198{name_minus} { return str(yyscanner, PE_NAME); }
@@ -264,6 +270,8 @@ r{num_raw_hex} { return raw(yyscanner); }
264{num_hex} { return value(yyscanner, 16); } 270{num_hex} { return value(yyscanner, 16); }
265 271
266{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); } 272{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); }
273{bpf_object} { return str(yyscanner, PE_BPF_OBJECT); }
274{bpf_source} { return str(yyscanner, PE_BPF_SOURCE); }
267{name} { return pmu_str_check(yyscanner); } 275{name} { return pmu_str_check(yyscanner); }
268"/" { BEGIN(config); return '/'; } 276"/" { BEGIN(config); return '/'; }
269- { return '-'; } 277- { return '-'; }
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 9cd70819c795..ad379968d4c1 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -42,6 +42,7 @@ static inc_group_count(struct list_head *list,
42%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM 42%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM
43%token PE_EVENT_NAME 43%token PE_EVENT_NAME
44%token PE_NAME 44%token PE_NAME
45%token PE_BPF_OBJECT PE_BPF_SOURCE
45%token PE_MODIFIER_EVENT PE_MODIFIER_BP 46%token PE_MODIFIER_EVENT PE_MODIFIER_BP
46%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT 47%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
47%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP 48%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
@@ -53,6 +54,8 @@ static inc_group_count(struct list_head *list,
53%type <num> PE_RAW 54%type <num> PE_RAW
54%type <num> PE_TERM 55%type <num> PE_TERM
55%type <str> PE_NAME 56%type <str> PE_NAME
57%type <str> PE_BPF_OBJECT
58%type <str> PE_BPF_SOURCE
56%type <str> PE_NAME_CACHE_TYPE 59%type <str> PE_NAME_CACHE_TYPE
57%type <str> PE_NAME_CACHE_OP_RESULT 60%type <str> PE_NAME_CACHE_OP_RESULT
58%type <str> PE_MODIFIER_EVENT 61%type <str> PE_MODIFIER_EVENT
@@ -67,8 +70,10 @@ static inc_group_count(struct list_head *list,
67%type <head> event_legacy_cache 70%type <head> event_legacy_cache
68%type <head> event_legacy_mem 71%type <head> event_legacy_mem
69%type <head> event_legacy_tracepoint 72%type <head> event_legacy_tracepoint
73%type <tracepoint_name> tracepoint_name
70%type <head> event_legacy_numeric 74%type <head> event_legacy_numeric
71%type <head> event_legacy_raw 75%type <head> event_legacy_raw
76%type <head> event_bpf_file
72%type <head> event_def 77%type <head> event_def
73%type <head> event_mod 78%type <head> event_mod
74%type <head> event_name 79%type <head> event_name
@@ -84,6 +89,10 @@ static inc_group_count(struct list_head *list,
84 u64 num; 89 u64 num;
85 struct list_head *head; 90 struct list_head *head;
86 struct parse_events_term *term; 91 struct parse_events_term *term;
92 struct tracepoint_name {
93 char *sys;
94 char *event;
95 } tracepoint_name;
87} 96}
88%% 97%%
89 98
@@ -198,7 +207,8 @@ event_def: event_pmu |
198 event_legacy_mem | 207 event_legacy_mem |
199 event_legacy_tracepoint sep_dc | 208 event_legacy_tracepoint sep_dc |
200 event_legacy_numeric sep_dc | 209 event_legacy_numeric sep_dc |
201 event_legacy_raw sep_dc 210 event_legacy_raw sep_dc |
211 event_bpf_file
202 212
203event_pmu: 213event_pmu:
204PE_NAME '/' event_config '/' 214PE_NAME '/' event_config '/'
@@ -368,36 +378,60 @@ PE_PREFIX_MEM PE_VALUE sep_dc
368} 378}
369 379
370event_legacy_tracepoint: 380event_legacy_tracepoint:
371PE_NAME '-' PE_NAME ':' PE_NAME 381tracepoint_name
372{ 382{
373 struct parse_events_evlist *data = _data; 383 struct parse_events_evlist *data = _data;
384 struct parse_events_error *error = data->error;
374 struct list_head *list; 385 struct list_head *list;
375 char sys_name[128];
376 snprintf(&sys_name, 128, "%s-%s", $1, $3);
377 386
378 ALLOC_LIST(list); 387 ALLOC_LIST(list);
379 ABORT_ON(parse_events_add_tracepoint(list, &data->idx, &sys_name, $5)); 388 if (error)
389 error->idx = @1.first_column;
390
391 if (parse_events_add_tracepoint(list, &data->idx, $1.sys, $1.event,
392 error, NULL))
393 return -1;
394
380 $$ = list; 395 $$ = list;
381} 396}
382| 397|
383PE_NAME ':' PE_NAME 398tracepoint_name '/' event_config '/'
384{ 399{
385 struct parse_events_evlist *data = _data; 400 struct parse_events_evlist *data = _data;
401 struct parse_events_error *error = data->error;
386 struct list_head *list; 402 struct list_head *list;
387 403
388 ALLOC_LIST(list); 404 ALLOC_LIST(list);
389 if (parse_events_add_tracepoint(list, &data->idx, $1, $3)) { 405 if (error)
390 struct parse_events_error *error = data->error; 406 error->idx = @1.first_column;
391 407
392 if (error) { 408 if (parse_events_add_tracepoint(list, &data->idx, $1.sys, $1.event,
393 error->idx = @1.first_column; 409 error, $3))
394 error->str = strdup("unknown tracepoint");
395 }
396 return -1; 410 return -1;
397 } 411
398 $$ = list; 412 $$ = list;
399} 413}
400 414
415tracepoint_name:
416PE_NAME '-' PE_NAME ':' PE_NAME
417{
418 char sys_name[128];
419 struct tracepoint_name tracepoint;
420
421 snprintf(&sys_name, 128, "%s-%s", $1, $3);
422 tracepoint.sys = &sys_name;
423 tracepoint.event = $5;
424
425 $$ = tracepoint;
426}
427|
428PE_NAME ':' PE_NAME
429{
430 struct tracepoint_name tracepoint = {$1, $3};
431
432 $$ = tracepoint;
433}
434
401event_legacy_numeric: 435event_legacy_numeric:
402PE_VALUE ':' PE_VALUE 436PE_VALUE ':' PE_VALUE
403{ 437{
@@ -420,6 +454,28 @@ PE_RAW
420 $$ = list; 454 $$ = list;
421} 455}
422 456
457event_bpf_file:
458PE_BPF_OBJECT
459{
460 struct parse_events_evlist *data = _data;
461 struct parse_events_error *error = data->error;
462 struct list_head *list;
463
464 ALLOC_LIST(list);
465 ABORT_ON(parse_events_load_bpf(data, list, $1, false));
466 $$ = list;
467}
468|
469PE_BPF_SOURCE
470{
471 struct parse_events_evlist *data = _data;
472 struct list_head *list;
473
474 ALLOC_LIST(list);
475 ABORT_ON(parse_events_load_bpf(data, list, $1, true));
476 $$ = list;
477}
478
423start_terms: event_config 479start_terms: event_config
424{ 480{
425 struct parse_events_terms *data = _data; 481 struct parse_events_terms *data = _data;
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 01626be2a8eb..9fca09296eb3 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -2,10 +2,13 @@
2#include "parse-options.h" 2#include "parse-options.h"
3#include "cache.h" 3#include "cache.h"
4#include "header.h" 4#include "header.h"
5#include <linux/string.h>
5 6
6#define OPT_SHORT 1 7#define OPT_SHORT 1
7#define OPT_UNSET 2 8#define OPT_UNSET 2
8 9
10static struct strbuf error_buf = STRBUF_INIT;
11
9static int opterror(const struct option *opt, const char *reason, int flags) 12static int opterror(const struct option *opt, const char *reason, int flags)
10{ 13{
11 if (flags & OPT_SHORT) 14 if (flags & OPT_SHORT)
@@ -372,7 +375,8 @@ void parse_options_start(struct parse_opt_ctx_t *ctx,
372} 375}
373 376
374static int usage_with_options_internal(const char * const *, 377static int usage_with_options_internal(const char * const *,
375 const struct option *, int); 378 const struct option *, int,
379 struct parse_opt_ctx_t *);
376 380
377int parse_options_step(struct parse_opt_ctx_t *ctx, 381int parse_options_step(struct parse_opt_ctx_t *ctx,
378 const struct option *options, 382 const struct option *options,
@@ -396,8 +400,9 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
396 400
397 if (arg[1] != '-') { 401 if (arg[1] != '-') {
398 ctx->opt = ++arg; 402 ctx->opt = ++arg;
399 if (internal_help && *ctx->opt == 'h') 403 if (internal_help && *ctx->opt == 'h') {
400 return usage_with_options_internal(usagestr, options, 0); 404 return usage_with_options_internal(usagestr, options, 0, ctx);
405 }
401 switch (parse_short_opt(ctx, options)) { 406 switch (parse_short_opt(ctx, options)) {
402 case -1: 407 case -1:
403 return parse_options_usage(usagestr, options, arg, 1); 408 return parse_options_usage(usagestr, options, arg, 1);
@@ -412,7 +417,7 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
412 check_typos(arg, options); 417 check_typos(arg, options);
413 while (ctx->opt) { 418 while (ctx->opt) {
414 if (internal_help && *ctx->opt == 'h') 419 if (internal_help && *ctx->opt == 'h')
415 return usage_with_options_internal(usagestr, options, 0); 420 return usage_with_options_internal(usagestr, options, 0, ctx);
416 arg = ctx->opt; 421 arg = ctx->opt;
417 switch (parse_short_opt(ctx, options)) { 422 switch (parse_short_opt(ctx, options)) {
418 case -1: 423 case -1:
@@ -445,9 +450,9 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
445 450
446 arg += 2; 451 arg += 2;
447 if (internal_help && !strcmp(arg, "help-all")) 452 if (internal_help && !strcmp(arg, "help-all"))
448 return usage_with_options_internal(usagestr, options, 1); 453 return usage_with_options_internal(usagestr, options, 1, ctx);
449 if (internal_help && !strcmp(arg, "help")) 454 if (internal_help && !strcmp(arg, "help"))
450 return usage_with_options_internal(usagestr, options, 0); 455 return usage_with_options_internal(usagestr, options, 0, ctx);
451 if (!strcmp(arg, "list-opts")) 456 if (!strcmp(arg, "list-opts"))
452 return PARSE_OPT_LIST_OPTS; 457 return PARSE_OPT_LIST_OPTS;
453 if (!strcmp(arg, "list-cmds")) 458 if (!strcmp(arg, "list-cmds"))
@@ -496,7 +501,7 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
496{ 501{
497 struct parse_opt_ctx_t ctx; 502 struct parse_opt_ctx_t ctx;
498 503
499 perf_header__set_cmdline(argc, argv); 504 perf_env__set_cmdline(&perf_env, argc, argv);
500 505
501 /* build usage string if it's not provided */ 506 /* build usage string if it's not provided */
502 if (subcommands && !usagestr[0]) { 507 if (subcommands && !usagestr[0]) {
@@ -537,9 +542,11 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
537 exit(130); 542 exit(130);
538 default: /* PARSE_OPT_UNKNOWN */ 543 default: /* PARSE_OPT_UNKNOWN */
539 if (ctx.argv[0][1] == '-') { 544 if (ctx.argv[0][1] == '-') {
540 error("unknown option `%s'", ctx.argv[0] + 2); 545 strbuf_addf(&error_buf, "unknown option `%s'",
546 ctx.argv[0] + 2);
541 } else { 547 } else {
542 error("unknown switch `%c'", *ctx.opt); 548 strbuf_addf(&error_buf, "unknown switch `%c'",
549 *ctx.opt);
543 } 550 }
544 usage_with_options(usagestr, options); 551 usage_with_options(usagestr, options);
545 } 552 }
@@ -642,13 +649,93 @@ static void print_option_help(const struct option *opts, int full)
642 fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help); 649 fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help);
643} 650}
644 651
652static int option__cmp(const void *va, const void *vb)
653{
654 const struct option *a = va, *b = vb;
655 int sa = tolower(a->short_name), sb = tolower(b->short_name), ret;
656
657 if (sa == 0)
658 sa = 'z' + 1;
659 if (sb == 0)
660 sb = 'z' + 1;
661
662 ret = sa - sb;
663
664 if (ret == 0) {
665 const char *la = a->long_name ?: "",
666 *lb = b->long_name ?: "";
667 ret = strcmp(la, lb);
668 }
669
670 return ret;
671}
672
673static struct option *options__order(const struct option *opts)
674{
675 int nr_opts = 0;
676 const struct option *o = opts;
677 struct option *ordered;
678
679 for (o = opts; o->type != OPTION_END; o++)
680 ++nr_opts;
681
682 ordered = memdup(opts, sizeof(*o) * (nr_opts + 1));
683 if (ordered == NULL)
684 goto out;
685
686 qsort(ordered, nr_opts, sizeof(*o), option__cmp);
687out:
688 return ordered;
689}
690
691static bool option__in_argv(const struct option *opt, const struct parse_opt_ctx_t *ctx)
692{
693 int i;
694
695 for (i = 1; i < ctx->argc; ++i) {
696 const char *arg = ctx->argv[i];
697
698 if (arg[0] != '-') {
699 if (arg[1] == '\0') {
700 if (arg[0] == opt->short_name)
701 return true;
702 continue;
703 }
704
705 if (opt->long_name && strcmp(opt->long_name, arg) == 0)
706 return true;
707
708 if (opt->help && strcasestr(opt->help, arg) != NULL)
709 return true;
710
711 continue;
712 }
713
714 if (arg[1] == opt->short_name ||
715 (arg[1] == '-' && opt->long_name && strcmp(opt->long_name, arg + 2) == 0))
716 return true;
717 }
718
719 return false;
720}
721
645int usage_with_options_internal(const char * const *usagestr, 722int usage_with_options_internal(const char * const *usagestr,
646 const struct option *opts, int full) 723 const struct option *opts, int full,
724 struct parse_opt_ctx_t *ctx)
647{ 725{
726 struct option *ordered;
727
648 if (!usagestr) 728 if (!usagestr)
649 return PARSE_OPT_HELP; 729 return PARSE_OPT_HELP;
650 730
651 fprintf(stderr, "\n usage: %s\n", *usagestr++); 731 setup_pager();
732
733 if (strbuf_avail(&error_buf)) {
734 fprintf(stderr, " Error: %s\n", error_buf.buf);
735 strbuf_release(&error_buf);
736 }
737
738 fprintf(stderr, "\n Usage: %s\n", *usagestr++);
652 while (*usagestr && **usagestr) 739 while (*usagestr && **usagestr)
653 fprintf(stderr, " or: %s\n", *usagestr++); 740 fprintf(stderr, " or: %s\n", *usagestr++);
654 while (*usagestr) { 741 while (*usagestr) {
@@ -661,11 +748,20 @@ int usage_with_options_internal(const char * const *usagestr,
661 if (opts->type != OPTION_GROUP) 748 if (opts->type != OPTION_GROUP)
662 fputc('\n', stderr); 749 fputc('\n', stderr);
663 750
664 for ( ; opts->type != OPTION_END; opts++) 751 ordered = options__order(opts);
752 if (ordered)
753 opts = ordered;
754
755 for ( ; opts->type != OPTION_END; opts++) {
756 if (ctx && ctx->argc > 1 && !option__in_argv(opts, ctx))
757 continue;
665 print_option_help(opts, full); 758 print_option_help(opts, full);
759 }
666 760
667 fputc('\n', stderr); 761 fputc('\n', stderr);
668 762
763 free(ordered);
764
669 return PARSE_OPT_HELP; 765 return PARSE_OPT_HELP;
670} 766}
671 767
@@ -673,7 +769,22 @@ void usage_with_options(const char * const *usagestr,
673 const struct option *opts) 769 const struct option *opts)
674{ 770{
675 exit_browser(false); 771 exit_browser(false);
676 usage_with_options_internal(usagestr, opts, 0); 772 usage_with_options_internal(usagestr, opts, 0, NULL);
773 exit(129);
774}
775
776void usage_with_options_msg(const char * const *usagestr,
777 const struct option *opts, const char *fmt, ...)
778{
779 va_list ap;
780
781 exit_browser(false);
782
783 va_start(ap, fmt);
784 strbuf_addv(&error_buf, fmt, ap);
785 va_end(ap);
786
787 usage_with_options_internal(usagestr, opts, 0, NULL);
677 exit(129); 788 exit(129);
678} 789}
679 790
@@ -684,7 +795,7 @@ int parse_options_usage(const char * const *usagestr,
684 if (!usagestr) 795 if (!usagestr)
685 goto opt; 796 goto opt;
686 797
687 fprintf(stderr, "\n usage: %s\n", *usagestr++); 798 fprintf(stderr, "\n Usage: %s\n", *usagestr++);
688 while (*usagestr && **usagestr) 799 while (*usagestr && **usagestr)
689 fprintf(stderr, " or: %s\n", *usagestr++); 800 fprintf(stderr, " or: %s\n", *usagestr++);
690 while (*usagestr) { 801 while (*usagestr) {
@@ -698,24 +809,23 @@ int parse_options_usage(const char * const *usagestr,
698opt: 809opt:
699 for ( ; opts->type != OPTION_END; opts++) { 810 for ( ; opts->type != OPTION_END; opts++) {
700 if (short_opt) { 811 if (short_opt) {
701 if (opts->short_name == *optstr) 812 if (opts->short_name == *optstr) {
813 print_option_help(opts, 0);
702 break; 814 break;
815 }
703 continue; 816 continue;
704 } 817 }
705 818
706 if (opts->long_name == NULL) 819 if (opts->long_name == NULL)
707 continue; 820 continue;
708 821
709 if (!prefixcmp(optstr, opts->long_name)) 822 if (!prefixcmp(opts->long_name, optstr))
710 break; 823 print_option_help(opts, 0);
711 if (!prefixcmp(optstr, "no-") && 824 if (!prefixcmp("no-", optstr) &&
712 !prefixcmp(optstr + 3, opts->long_name)) 825 !prefixcmp(opts->long_name, optstr + 3))
713 break; 826 print_option_help(opts, 0);
714 } 827 }
715 828
716 if (opts->type != OPTION_END)
717 print_option_help(opts, 0);
718
719 return PARSE_OPT_HELP; 829 return PARSE_OPT_HELP;
720} 830}
721 831
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index 367d8b816cc7..a8e407bc251e 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -111,6 +111,7 @@ struct option {
111#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) } 111#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
112#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) } 112#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) }
113#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) } 113#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) }
114#define OPT_BOOLEAN_FLAG(s, l, v, h, f) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h), .flags = (f) }
114#define OPT_BOOLEAN_SET(s, l, v, os, h) \ 115#define OPT_BOOLEAN_SET(s, l, v, os, h) \
115 { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), \ 116 { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), \
116 .value = check_vtype(v, bool *), .help = (h), \ 117 .value = check_vtype(v, bool *), .help = (h), \
@@ -160,6 +161,10 @@ extern int parse_options_subcommand(int argc, const char **argv,
160 161
161extern NORETURN void usage_with_options(const char * const *usagestr, 162extern NORETURN void usage_with_options(const char * const *usagestr,
162 const struct option *options); 163 const struct option *options);
164extern NORETURN __attribute__((format(printf,3,4)))
165void usage_with_options_msg(const char * const *usagestr,
166 const struct option *options,
167 const char *fmt, ...);
163 168
164/*----- incremantal advanced APIs -----*/ 169/*----- incremantal advanced APIs -----*/
165 170
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 89c91a1a67e7..e4b173dec4b9 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -626,38 +626,26 @@ static int pmu_resolve_param_term(struct parse_events_term *term,
626 return -1; 626 return -1;
627} 627}
628 628
629static char *formats_error_string(struct list_head *formats) 629static char *pmu_formats_string(struct list_head *formats)
630{ 630{
631 struct perf_pmu_format *format; 631 struct perf_pmu_format *format;
632 char *err, *str; 632 char *str;
633 static const char *static_terms = "config,config1,config2,name," 633 struct strbuf buf;
634 "period,freq,branch_type,time,"
635 "call-graph,stack-size\n";
636 unsigned i = 0; 634 unsigned i = 0;
637 635
638 if (!asprintf(&str, "valid terms:")) 636 if (!formats)
639 return NULL; 637 return NULL;
640 638
639 strbuf_init(&buf, 0);
641 /* sysfs exported terms */ 640 /* sysfs exported terms */
642 list_for_each_entry(format, formats, list) { 641 list_for_each_entry(format, formats, list)
643 char c = i++ ? ',' : ' '; 642 strbuf_addf(&buf, i++ ? ",%s" : "%s",
644 643 format->name);
645 err = str;
646 if (!asprintf(&str, "%s%c%s", err, c, format->name))
647 goto fail;
648 free(err);
649 }
650 644
651 /* static terms */ 645 str = strbuf_detach(&buf, NULL);
652 err = str; 646 strbuf_release(&buf);
653 if (!asprintf(&str, "%s,%s", err, static_terms))
654 goto fail;
655 647
656 free(err);
657 return str; 648 return str;
658fail:
659 free(err);
660 return NULL;
661} 649}
662 650
663/* 651/*
@@ -693,9 +681,12 @@ static int pmu_config_term(struct list_head *formats,
693 if (verbose) 681 if (verbose)
694 printf("Invalid event/parameter '%s'\n", term->config); 682 printf("Invalid event/parameter '%s'\n", term->config);
695 if (err) { 683 if (err) {
684 char *pmu_term = pmu_formats_string(formats);
685
696 err->idx = term->err_term; 686 err->idx = term->err_term;
697 err->str = strdup("unknown term"); 687 err->str = strdup("unknown term");
698 err->help = formats_error_string(formats); 688 err->help = parse_events_formats_error_string(pmu_term);
689 free(pmu_term);
699 } 690 }
700 return -EINVAL; 691 return -EINVAL;
701 } 692 }
@@ -1017,7 +1008,8 @@ void print_pmu_events(const char *event_glob, bool name_only)
1017 goto out_enomem; 1008 goto out_enomem;
1018 j++; 1009 j++;
1019 } 1010 }
1020 if (pmu->selectable) { 1011 if (pmu->selectable &&
1012 (event_glob == NULL || strglobmatch(pmu->name, event_glob))) {
1021 char *s; 1013 char *s;
1022 if (asprintf(&s, "%s//", pmu->name) < 0) 1014 if (asprintf(&s, "%s//", pmu->name) < 0)
1023 goto out_enomem; 1015 goto out_enomem;
@@ -1035,7 +1027,7 @@ void print_pmu_events(const char *event_glob, bool name_only)
1035 printf(" %-50s [Kernel PMU event]\n", aliases[j]); 1027 printf(" %-50s [Kernel PMU event]\n", aliases[j]);
1036 printed++; 1028 printed++;
1037 } 1029 }
1038 if (printed) 1030 if (printed && pager_in_use())
1039 printf("\n"); 1031 printf("\n");
1040out_free: 1032out_free:
1041 for (j = 0; j < len; j++) 1033 for (j = 0; j < len; j++)
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index c6f9af78f6f5..b51a8bfb40f9 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -40,8 +40,7 @@
40#include "color.h" 40#include "color.h"
41#include "symbol.h" 41#include "symbol.h"
42#include "thread.h" 42#include "thread.h"
43#include <api/fs/debugfs.h> 43#include <api/fs/fs.h>
44#include <api/fs/tracefs.h>
45#include "trace-event.h" /* For __maybe_unused */ 44#include "trace-event.h" /* For __maybe_unused */
46#include "probe-event.h" 45#include "probe-event.h"
47#include "probe-finder.h" 46#include "probe-finder.h"
@@ -72,7 +71,7 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
72static struct machine *host_machine; 71static struct machine *host_machine;
73 72
74/* Initialize symbol maps and path of vmlinux/modules */ 73/* Initialize symbol maps and path of vmlinux/modules */
75static int init_symbol_maps(bool user_only) 74int init_probe_symbol_maps(bool user_only)
76{ 75{
77 int ret; 76 int ret;
78 77
@@ -102,7 +101,7 @@ out:
102 return ret; 101 return ret;
103} 102}
104 103
105static void exit_symbol_maps(void) 104void exit_probe_symbol_maps(void)
106{ 105{
107 if (host_machine) { 106 if (host_machine) {
108 machine__delete(host_machine); 107 machine__delete(host_machine);
@@ -127,17 +126,19 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
127{ 126{
128 /* kmap->ref_reloc_sym should be set if host_machine is initialized */ 127 /* kmap->ref_reloc_sym should be set if host_machine is initialized */
129 struct kmap *kmap; 128 struct kmap *kmap;
129 struct map *map = machine__kernel_map(host_machine);
130 130
131 if (map__load(host_machine->vmlinux_maps[MAP__FUNCTION], NULL) < 0) 131 if (map__load(map, NULL) < 0)
132 return NULL; 132 return NULL;
133 133
134 kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]); 134 kmap = map__kmap(map);
135 if (!kmap) 135 if (!kmap)
136 return NULL; 136 return NULL;
137 return kmap->ref_reloc_sym; 137 return kmap->ref_reloc_sym;
138} 138}
139 139
140static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc) 140static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
141 bool reloc, bool reladdr)
141{ 142{
142 struct ref_reloc_sym *reloc_sym; 143 struct ref_reloc_sym *reloc_sym;
143 struct symbol *sym; 144 struct symbol *sym;
@@ -146,12 +147,14 @@ static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
146 /* ref_reloc_sym is just a label. Need a special fix*/ 147 /* ref_reloc_sym is just a label. Need a special fix*/
147 reloc_sym = kernel_get_ref_reloc_sym(); 148 reloc_sym = kernel_get_ref_reloc_sym();
148 if (reloc_sym && strcmp(name, reloc_sym->name) == 0) 149 if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
149 return (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; 150 *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
150 else { 151 else {
151 sym = __find_kernel_function_by_name(name, &map); 152 sym = __find_kernel_function_by_name(name, &map);
152 if (sym) 153 if (!sym)
153 return map->unmap_ip(map, sym->start) - 154 return -ENOENT;
154 ((reloc) ? 0 : map->reloc); 155 *addr = map->unmap_ip(map, sym->start) -
156 ((reloc) ? 0 : map->reloc) -
157 ((reladdr) ? map->start : 0);
155 } 158 }
156 return 0; 159 return 0;
157} 160}
@@ -245,12 +248,14 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
245static bool kprobe_blacklist__listed(unsigned long address); 248static bool kprobe_blacklist__listed(unsigned long address);
246static bool kprobe_warn_out_range(const char *symbol, unsigned long address) 249static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
247{ 250{
248 u64 etext_addr; 251 u64 etext_addr = 0;
252 int ret;
249 253
250 /* Get the address of _etext for checking non-probable text symbol */ 254 /* Get the address of _etext for checking non-probable text symbol */
251 etext_addr = kernel_get_symbol_address_by_name("_etext", false); 255 ret = kernel_get_symbol_address_by_name("_etext", &etext_addr,
256 false, false);
252 257
253 if (etext_addr != 0 && etext_addr < address) 258 if (ret == 0 && etext_addr < address)
254 pr_warning("%s is out of .text, skip it.\n", symbol); 259 pr_warning("%s is out of .text, skip it.\n", symbol);
255 else if (kprobe_blacklist__listed(address)) 260 else if (kprobe_blacklist__listed(address))
256 pr_warning("%s is blacklisted function, skip it.\n", symbol); 261 pr_warning("%s is blacklisted function, skip it.\n", symbol);
@@ -282,7 +287,7 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso)
282 return -ENOENT; 287 return -ENOENT;
283 } 288 }
284 289
285 map = host_machine->vmlinux_maps[MAP__FUNCTION]; 290 map = machine__kernel_map(host_machine);
286 dso = map->dso; 291 dso = map->dso;
287 292
288 vmlinux_name = symbol_conf.vmlinux_name; 293 vmlinux_name = symbol_conf.vmlinux_name;
@@ -436,19 +441,22 @@ static char *debuginfo_cache_path;
436 441
437static struct debuginfo *debuginfo_cache__open(const char *module, bool silent) 442static struct debuginfo *debuginfo_cache__open(const char *module, bool silent)
438{ 443{
439 if ((debuginfo_cache_path && !strcmp(debuginfo_cache_path, module)) || 444 const char *path = module;
440 (!debuginfo_cache_path && !module && debuginfo_cache)) 445
446 /* If the module is NULL, it should be the kernel. */
447 if (!module)
448 path = "kernel";
449
450 if (debuginfo_cache_path && !strcmp(debuginfo_cache_path, path))
441 goto out; 451 goto out;
442 452
443 /* Copy module path */ 453 /* Copy module path */
444 free(debuginfo_cache_path); 454 free(debuginfo_cache_path);
445 if (module) { 455 debuginfo_cache_path = strdup(path);
446 debuginfo_cache_path = strdup(module); 456 if (!debuginfo_cache_path) {
447 if (!debuginfo_cache_path) { 457 debuginfo__delete(debuginfo_cache);
448 debuginfo__delete(debuginfo_cache); 458 debuginfo_cache = NULL;
449 debuginfo_cache = NULL; 459 goto out;
450 goto out;
451 }
452 } 460 }
453 461
454 debuginfo_cache = open_debuginfo(module, silent); 462 debuginfo_cache = open_debuginfo(module, silent);
@@ -517,8 +525,10 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
517 goto error; 525 goto error;
518 addr += stext; 526 addr += stext;
519 } else if (tp->symbol) { 527 } else if (tp->symbol) {
520 addr = kernel_get_symbol_address_by_name(tp->symbol, false); 528 /* If the module is given, this returns relative address */
521 if (addr == 0) 529 ret = kernel_get_symbol_address_by_name(tp->symbol, &addr,
530 false, !!tp->module);
531 if (ret != 0)
522 goto error; 532 goto error;
523 addr += tp->offset; 533 addr += tp->offset;
524 } 534 }
@@ -861,11 +871,11 @@ int show_line_range(struct line_range *lr, const char *module, bool user)
861{ 871{
862 int ret; 872 int ret;
863 873
864 ret = init_symbol_maps(user); 874 ret = init_probe_symbol_maps(user);
865 if (ret < 0) 875 if (ret < 0)
866 return ret; 876 return ret;
867 ret = __show_line_range(lr, module, user); 877 ret = __show_line_range(lr, module, user);
868 exit_symbol_maps(); 878 exit_probe_symbol_maps();
869 879
870 return ret; 880 return ret;
871} 881}
@@ -943,7 +953,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
943 int i, ret = 0; 953 int i, ret = 0;
944 struct debuginfo *dinfo; 954 struct debuginfo *dinfo;
945 955
946 ret = init_symbol_maps(pevs->uprobes); 956 ret = init_probe_symbol_maps(pevs->uprobes);
947 if (ret < 0) 957 if (ret < 0)
948 return ret; 958 return ret;
949 959
@@ -960,7 +970,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
960 970
961 debuginfo__delete(dinfo); 971 debuginfo__delete(dinfo);
962out: 972out:
963 exit_symbol_maps(); 973 exit_probe_symbol_maps();
964 return ret; 974 return ret;
965} 975}
966 976
@@ -1884,8 +1894,12 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
1884 goto out; 1894 goto out;
1885 sym = map__find_symbol(map, addr, NULL); 1895 sym = map__find_symbol(map, addr, NULL);
1886 } else { 1896 } else {
1887 if (tp->symbol) 1897 if (tp->symbol && !addr) {
1888 addr = kernel_get_symbol_address_by_name(tp->symbol, true); 1898 ret = kernel_get_symbol_address_by_name(tp->symbol,
1899 &addr, true, false);
1900 if (ret < 0)
1901 goto out;
1902 }
1889 if (addr) { 1903 if (addr) {
1890 addr += tp->offset; 1904 addr += tp->offset;
1891 sym = __find_kernel_function(addr, &map); 1905 sym = __find_kernel_function(addr, &map);
@@ -2055,7 +2069,7 @@ static void kprobe_blacklist__delete(struct list_head *blacklist)
2055static int kprobe_blacklist__load(struct list_head *blacklist) 2069static int kprobe_blacklist__load(struct list_head *blacklist)
2056{ 2070{
2057 struct kprobe_blacklist_node *node; 2071 struct kprobe_blacklist_node *node;
2058 const char *__debugfs = debugfs_find_mountpoint(); 2072 const char *__debugfs = debugfs__mountpoint();
2059 char buf[PATH_MAX], *p; 2073 char buf[PATH_MAX], *p;
2060 FILE *fp; 2074 FILE *fp;
2061 int ret; 2075 int ret;
@@ -2181,9 +2195,9 @@ out:
2181} 2195}
2182 2196
2183/* Show an event */ 2197/* Show an event */
2184static int show_perf_probe_event(const char *group, const char *event, 2198int show_perf_probe_event(const char *group, const char *event,
2185 struct perf_probe_event *pev, 2199 struct perf_probe_event *pev,
2186 const char *module, bool use_stdout) 2200 const char *module, bool use_stdout)
2187{ 2201{
2188 struct strbuf buf = STRBUF_INIT; 2202 struct strbuf buf = STRBUF_INIT;
2189 int ret; 2203 int ret;
@@ -2264,7 +2278,7 @@ int show_perf_probe_events(struct strfilter *filter)
2264 2278
2265 setup_pager(); 2279 setup_pager();
2266 2280
2267 ret = init_symbol_maps(false); 2281 ret = init_probe_symbol_maps(false);
2268 if (ret < 0) 2282 if (ret < 0)
2269 return ret; 2283 return ret;
2270 2284
@@ -2280,7 +2294,7 @@ int show_perf_probe_events(struct strfilter *filter)
2280 close(kp_fd); 2294 close(kp_fd);
2281 if (up_fd > 0) 2295 if (up_fd > 0)
2282 close(up_fd); 2296 close(up_fd);
2283 exit_symbol_maps(); 2297 exit_probe_symbol_maps();
2284 2298
2285 return ret; 2299 return ret;
2286} 2300}
@@ -2289,36 +2303,41 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
2289 struct strlist *namelist, bool allow_suffix) 2303 struct strlist *namelist, bool allow_suffix)
2290{ 2304{
2291 int i, ret; 2305 int i, ret;
2292 char *p; 2306 char *p, *nbase;
2293 2307
2294 if (*base == '.') 2308 if (*base == '.')
2295 base++; 2309 base++;
2310 nbase = strdup(base);
2311 if (!nbase)
2312 return -ENOMEM;
2313
2314 /* Cut off the dot suffixes (e.g. .const, .isra)*/
2315 p = strchr(nbase, '.');
2316 if (p && p != nbase)
2317 *p = '\0';
2296 2318
2297 /* Try no suffix */ 2319 /* Try no suffix number */
2298 ret = e_snprintf(buf, len, "%s", base); 2320 ret = e_snprintf(buf, len, "%s", nbase);
2299 if (ret < 0) { 2321 if (ret < 0) {
2300 pr_debug("snprintf() failed: %d\n", ret); 2322 pr_debug("snprintf() failed: %d\n", ret);
2301 return ret; 2323 goto out;
2302 } 2324 }
2303 /* Cut off the postfixes (e.g. .const, .isra)*/
2304 p = strchr(buf, '.');
2305 if (p && p != buf)
2306 *p = '\0';
2307 if (!strlist__has_entry(namelist, buf)) 2325 if (!strlist__has_entry(namelist, buf))
2308 return 0; 2326 goto out;
2309 2327
2310 if (!allow_suffix) { 2328 if (!allow_suffix) {
2311 pr_warning("Error: event \"%s\" already exists. " 2329 pr_warning("Error: event \"%s\" already exists. "
2312 "(Use -f to force duplicates.)\n", base); 2330 "(Use -f to force duplicates.)\n", buf);
2313 return -EEXIST; 2331 ret = -EEXIST;
2332 goto out;
2314 } 2333 }
2315 2334
2316 /* Try to add suffix */ 2335 /* Try to add suffix */
2317 for (i = 1; i < MAX_EVENT_INDEX; i++) { 2336 for (i = 1; i < MAX_EVENT_INDEX; i++) {
2318 ret = e_snprintf(buf, len, "%s_%d", base, i); 2337 ret = e_snprintf(buf, len, "%s_%d", nbase, i);
2319 if (ret < 0) { 2338 if (ret < 0) {
2320 pr_debug("snprintf() failed: %d\n", ret); 2339 pr_debug("snprintf() failed: %d\n", ret);
2321 return ret; 2340 goto out;
2322 } 2341 }
2323 if (!strlist__has_entry(namelist, buf)) 2342 if (!strlist__has_entry(namelist, buf))
2324 break; 2343 break;
@@ -2328,6 +2347,8 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
2328 ret = -ERANGE; 2347 ret = -ERANGE;
2329 } 2348 }
2330 2349
2350out:
2351 free(nbase);
2331 return ret; 2352 return ret;
2332} 2353}
2333 2354
@@ -2400,7 +2421,6 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
2400{ 2421{
2401 int i, fd, ret; 2422 int i, fd, ret;
2402 struct probe_trace_event *tev = NULL; 2423 struct probe_trace_event *tev = NULL;
2403 const char *event = NULL, *group = NULL;
2404 struct strlist *namelist; 2424 struct strlist *namelist;
2405 2425
2406 fd = probe_file__open(PF_FL_RW | (pev->uprobes ? PF_FL_UPROBE : 0)); 2426 fd = probe_file__open(PF_FL_RW | (pev->uprobes ? PF_FL_UPROBE : 0));
@@ -2416,7 +2436,6 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
2416 } 2436 }
2417 2437
2418 ret = 0; 2438 ret = 0;
2419 pr_info("Added new event%s\n", (ntevs > 1) ? "s:" : ":");
2420 for (i = 0; i < ntevs; i++) { 2439 for (i = 0; i < ntevs; i++) {
2421 tev = &tevs[i]; 2440 tev = &tevs[i];
2422 /* Skip if the symbol is out of .text or blacklisted */ 2441 /* Skip if the symbol is out of .text or blacklisted */
@@ -2433,13 +2452,6 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
2433 if (ret < 0) 2452 if (ret < 0)
2434 break; 2453 break;
2435 2454
2436 /* We use tev's name for showing new events */
2437 show_perf_probe_event(tev->group, tev->event, pev,
2438 tev->point.module, false);
2439 /* Save the last valid name */
2440 event = tev->event;
2441 group = tev->group;
2442
2443 /* 2455 /*
2444 * Probes after the first probe which comes from same 2456 * Probes after the first probe which comes from same
2445 * user input are always allowed to add suffix, because 2457 * user input are always allowed to add suffix, because
@@ -2451,13 +2463,6 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
2451 if (ret == -EINVAL && pev->uprobes) 2463 if (ret == -EINVAL && pev->uprobes)
2452 warn_uprobe_event_compat(tev); 2464 warn_uprobe_event_compat(tev);
2453 2465
2454 /* Note that it is possible to skip all events because of blacklist */
2455 if (ret >= 0 && event) {
2456 /* Show how to use the event. */
2457 pr_info("\nYou can now use it in all perf tools, such as:\n\n");
2458 pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", group, event);
2459 }
2460
2461 strlist__delete(namelist); 2466 strlist__delete(namelist);
2462close_out: 2467close_out:
2463 close(fd); 2468 close(fd);
@@ -2538,7 +2543,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2538 goto out; 2543 goto out;
2539 } 2544 }
2540 2545
2541 if (!pev->uprobes && !pp->retprobe) { 2546 /* Note that the symbols in the kmodule are not relocated */
2547 if (!pev->uprobes && !pp->retprobe && !pev->target) {
2542 reloc_sym = kernel_get_ref_reloc_sym(); 2548 reloc_sym = kernel_get_ref_reloc_sym();
2543 if (!reloc_sym) { 2549 if (!reloc_sym) {
2544 pr_warning("Relocated base symbol is not found!\n"); 2550 pr_warning("Relocated base symbol is not found!\n");
@@ -2575,8 +2581,9 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2575 } 2581 }
2576 /* Add one probe point */ 2582 /* Add one probe point */
2577 tp->address = map->unmap_ip(map, sym->start) + pp->offset; 2583 tp->address = map->unmap_ip(map, sym->start) + pp->offset;
2578 /* If we found a wrong one, mark it by NULL symbol */ 2584
2579 if (!pev->uprobes && 2585 /* Check the kprobe (not in module) is within .text */
2586 if (!pev->uprobes && !pev->target &&
2580 kprobe_warn_out_range(sym->name, tp->address)) { 2587 kprobe_warn_out_range(sym->name, tp->address)) {
2581 tp->symbol = NULL; /* Skip it */ 2588 tp->symbol = NULL; /* Skip it */
2582 skipped++; 2589 skipped++;
@@ -2760,63 +2767,71 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
2760 return find_probe_trace_events_from_map(pev, tevs); 2767 return find_probe_trace_events_from_map(pev, tevs);
2761} 2768}
2762 2769
2763struct __event_package { 2770int convert_perf_probe_events(struct perf_probe_event *pevs, int npevs)
2764 struct perf_probe_event *pev;
2765 struct probe_trace_event *tevs;
2766 int ntevs;
2767};
2768
2769int add_perf_probe_events(struct perf_probe_event *pevs, int npevs)
2770{ 2771{
2771 int i, j, ret; 2772 int i, ret;
2772 struct __event_package *pkgs;
2773
2774 ret = 0;
2775 pkgs = zalloc(sizeof(struct __event_package) * npevs);
2776
2777 if (pkgs == NULL)
2778 return -ENOMEM;
2779
2780 ret = init_symbol_maps(pevs->uprobes);
2781 if (ret < 0) {
2782 free(pkgs);
2783 return ret;
2784 }
2785 2773
2786 /* Loop 1: convert all events */ 2774 /* Loop 1: convert all events */
2787 for (i = 0; i < npevs; i++) { 2775 for (i = 0; i < npevs; i++) {
2788 pkgs[i].pev = &pevs[i];
2789 /* Init kprobe blacklist if needed */ 2776 /* Init kprobe blacklist if needed */
2790 if (!pkgs[i].pev->uprobes) 2777 if (!pevs[i].uprobes)
2791 kprobe_blacklist__init(); 2778 kprobe_blacklist__init();
2792 /* Convert with or without debuginfo */ 2779 /* Convert with or without debuginfo */
2793 ret = convert_to_probe_trace_events(pkgs[i].pev, 2780 ret = convert_to_probe_trace_events(&pevs[i], &pevs[i].tevs);
2794 &pkgs[i].tevs);
2795 if (ret < 0) 2781 if (ret < 0)
2796 goto end; 2782 return ret;
2797 pkgs[i].ntevs = ret; 2783 pevs[i].ntevs = ret;
2798 } 2784 }
2799 /* This just release blacklist only if allocated */ 2785 /* This just release blacklist only if allocated */
2800 kprobe_blacklist__release(); 2786 kprobe_blacklist__release();
2801 2787
2788 return 0;
2789}
2790
2791int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs)
2792{
2793 int i, ret = 0;
2794
2802 /* Loop 2: add all events */ 2795 /* Loop 2: add all events */
2803 for (i = 0; i < npevs; i++) { 2796 for (i = 0; i < npevs; i++) {
2804 ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs, 2797 ret = __add_probe_trace_events(&pevs[i], pevs[i].tevs,
2805 pkgs[i].ntevs, 2798 pevs[i].ntevs,
2806 probe_conf.force_add); 2799 probe_conf.force_add);
2807 if (ret < 0) 2800 if (ret < 0)
2808 break; 2801 break;
2809 } 2802 }
2810end: 2803 return ret;
2804}
2805
2806void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs)
2807{
2808 int i, j;
2809
2811 /* Loop 3: cleanup and free trace events */ 2810 /* Loop 3: cleanup and free trace events */
2812 for (i = 0; i < npevs; i++) { 2811 for (i = 0; i < npevs; i++) {
2813 for (j = 0; j < pkgs[i].ntevs; j++) 2812 for (j = 0; j < pevs[i].ntevs; j++)
2814 clear_probe_trace_event(&pkgs[i].tevs[j]); 2813 clear_probe_trace_event(&pevs[i].tevs[j]);
2815 zfree(&pkgs[i].tevs); 2814 zfree(&pevs[i].tevs);
2815 pevs[i].ntevs = 0;
2816 clear_perf_probe_event(&pevs[i]);
2816 } 2817 }
2817 free(pkgs); 2818}
2818 exit_symbol_maps();
2819 2819
2820int add_perf_probe_events(struct perf_probe_event *pevs, int npevs)
2821{
2822 int ret;
2823
2824 ret = init_probe_symbol_maps(pevs->uprobes);
2825 if (ret < 0)
2826 return ret;
2827
2828 ret = convert_perf_probe_events(pevs, npevs);
2829 if (ret == 0)
2830 ret = apply_perf_probe_events(pevs, npevs);
2831
2832 cleanup_perf_probe_events(pevs, npevs);
2833
2834 exit_probe_symbol_maps();
2820 return ret; 2835 return ret;
2821} 2836}
2822 2837
@@ -2828,8 +2843,6 @@ int del_perf_probe_events(struct strfilter *filter)
2828 if (!str) 2843 if (!str)
2829 return -EINVAL; 2844 return -EINVAL;
2830 2845
2831 pr_debug("Delete filter: \'%s\'\n", str);
2832
2833 /* Get current event names */ 2846 /* Get current event names */
2834 ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW); 2847 ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW);
2835 if (ret < 0) 2848 if (ret < 0)
@@ -2844,9 +2857,6 @@ int del_perf_probe_events(struct strfilter *filter)
2844 ret = ret2; 2857 ret = ret2;
2845 goto error; 2858 goto error;
2846 } 2859 }
2847 if (ret == -ENOENT && ret2 == -ENOENT)
2848 pr_debug("\"%s\" does not hit any event.\n", str);
2849 /* Note that this is silently ignored */
2850 ret = 0; 2860 ret = 0;
2851 2861
2852error: 2862error:
@@ -2881,7 +2891,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
2881 struct map *map; 2891 struct map *map;
2882 int ret; 2892 int ret;
2883 2893
2884 ret = init_symbol_maps(user); 2894 ret = init_probe_symbol_maps(user);
2885 if (ret < 0) 2895 if (ret < 0)
2886 return ret; 2896 return ret;
2887 2897
@@ -2911,7 +2921,7 @@ end:
2911 if (user) { 2921 if (user) {
2912 map__put(map); 2922 map__put(map);
2913 } 2923 }
2914 exit_symbol_maps(); 2924 exit_probe_symbol_maps();
2915 2925
2916 return ret; 2926 return ret;
2917} 2927}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 6e7ec68a4aa8..ba926c30f8cd 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -87,6 +87,8 @@ struct perf_probe_event {
87 bool uprobes; /* Uprobe event flag */ 87 bool uprobes; /* Uprobe event flag */
88 char *target; /* Target binary */ 88 char *target; /* Target binary */
89 struct perf_probe_arg *args; /* Arguments */ 89 struct perf_probe_arg *args; /* Arguments */
90 struct probe_trace_event *tevs;
91 int ntevs;
90}; 92};
91 93
92/* Line range */ 94/* Line range */
@@ -108,6 +110,8 @@ struct variable_list {
108}; 110};
109 111
110struct map; 112struct map;
113int init_probe_symbol_maps(bool user_only);
114void exit_probe_symbol_maps(void);
111 115
112/* Command string to events */ 116/* Command string to events */
113extern int parse_perf_probe_command(const char *cmd, 117extern int parse_perf_probe_command(const char *cmd,
@@ -138,7 +142,14 @@ extern void line_range__clear(struct line_range *lr);
138extern int line_range__init(struct line_range *lr); 142extern int line_range__init(struct line_range *lr);
139 143
140extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs); 144extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs);
145extern int convert_perf_probe_events(struct perf_probe_event *pevs, int npevs);
146extern int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs);
147extern void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs);
141extern int del_perf_probe_events(struct strfilter *filter); 148extern int del_perf_probe_events(struct strfilter *filter);
149
150extern int show_perf_probe_event(const char *group, const char *event,
151 struct perf_probe_event *pev,
152 const char *module, bool use_stdout);
142extern int show_perf_probe_events(struct strfilter *filter); 153extern int show_perf_probe_events(struct strfilter *filter);
143extern int show_line_range(struct line_range *lr, const char *module, 154extern int show_line_range(struct line_range *lr, const char *module,
144 bool user); 155 bool user);
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index bbb243717ec8..89dbeb92c68e 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -22,8 +22,7 @@
22#include "color.h" 22#include "color.h"
23#include "symbol.h" 23#include "symbol.h"
24#include "thread.h" 24#include "thread.h"
25#include <api/fs/debugfs.h> 25#include <api/fs/tracing_path.h>
26#include <api/fs/tracefs.h>
27#include "probe-event.h" 26#include "probe-event.h"
28#include "probe-file.h" 27#include "probe-file.h"
29#include "session.h" 28#include "session.h"
@@ -73,21 +72,11 @@ static void print_both_open_warning(int kerr, int uerr)
73static int open_probe_events(const char *trace_file, bool readwrite) 72static int open_probe_events(const char *trace_file, bool readwrite)
74{ 73{
75 char buf[PATH_MAX]; 74 char buf[PATH_MAX];
76 const char *__debugfs;
77 const char *tracing_dir = ""; 75 const char *tracing_dir = "";
78 int ret; 76 int ret;
79 77
80 __debugfs = tracefs_find_mountpoint();
81 if (__debugfs == NULL) {
82 tracing_dir = "tracing/";
83
84 __debugfs = debugfs_find_mountpoint();
85 if (__debugfs == NULL)
86 return -ENOTSUP;
87 }
88
89 ret = e_snprintf(buf, PATH_MAX, "%s/%s%s", 78 ret = e_snprintf(buf, PATH_MAX, "%s/%s%s",
90 __debugfs, tracing_dir, trace_file); 79 tracing_path, tracing_dir, trace_file);
91 if (ret >= 0) { 80 if (ret >= 0) {
92 pr_debug("Opening %s write=%d\n", buf, readwrite); 81 pr_debug("Opening %s write=%d\n", buf, readwrite);
93 if (readwrite && !probe_event_dry_run) 82 if (readwrite && !probe_event_dry_run)
@@ -267,7 +256,6 @@ static int __del_trace_probe_event(int fd, struct str_node *ent)
267 goto error; 256 goto error;
268 } 257 }
269 258
270 pr_info("Removed event: %s\n", ent->s);
271 return 0; 259 return 0;
272error: 260error:
273 pr_warning("Failed to delete event: %s\n", 261 pr_warning("Failed to delete event: %s\n",
@@ -275,7 +263,8 @@ error:
275 return ret; 263 return ret;
276} 264}
277 265
278int probe_file__del_events(int fd, struct strfilter *filter) 266int probe_file__get_events(int fd, struct strfilter *filter,
267 struct strlist *plist)
279{ 268{
280 struct strlist *namelist; 269 struct strlist *namelist;
281 struct str_node *ent; 270 struct str_node *ent;
@@ -290,12 +279,43 @@ int probe_file__del_events(int fd, struct strfilter *filter)
290 p = strchr(ent->s, ':'); 279 p = strchr(ent->s, ':');
291 if ((p && strfilter__compare(filter, p + 1)) || 280 if ((p && strfilter__compare(filter, p + 1)) ||
292 strfilter__compare(filter, ent->s)) { 281 strfilter__compare(filter, ent->s)) {
293 ret = __del_trace_probe_event(fd, ent); 282 strlist__add(plist, ent->s);
294 if (ret < 0) 283 ret = 0;
295 break;
296 } 284 }
297 } 285 }
298 strlist__delete(namelist); 286 strlist__delete(namelist);
299 287
300 return ret; 288 return ret;
301} 289}
290
291int probe_file__del_strlist(int fd, struct strlist *namelist)
292{
293 int ret = 0;
294 struct str_node *ent;
295
296 strlist__for_each(ent, namelist) {
297 ret = __del_trace_probe_event(fd, ent);
298 if (ret < 0)
299 break;
300 }
301 return ret;
302}
303
304int probe_file__del_events(int fd, struct strfilter *filter)
305{
306 struct strlist *namelist;
307 int ret;
308
309 namelist = strlist__new(NULL, NULL);
310 if (!namelist)
311 return -ENOMEM;
312
313 ret = probe_file__get_events(fd, filter, namelist);
314 if (ret < 0)
315 return ret;
316
317 ret = probe_file__del_strlist(fd, namelist);
318 strlist__delete(namelist);
319
320 return ret;
321}
diff --git a/tools/perf/util/probe-file.h b/tools/perf/util/probe-file.h
index ada94a242a17..18ac9cf51c34 100644
--- a/tools/perf/util/probe-file.h
+++ b/tools/perf/util/probe-file.h
@@ -14,5 +14,9 @@ struct strlist *probe_file__get_namelist(int fd);
14struct strlist *probe_file__get_rawlist(int fd); 14struct strlist *probe_file__get_rawlist(int fd);
15int probe_file__add_event(int fd, struct probe_trace_event *tev); 15int probe_file__add_event(int fd, struct probe_trace_event *tev);
16int probe_file__del_events(int fd, struct strfilter *filter); 16int probe_file__del_events(int fd, struct strfilter *filter);
17int probe_file__get_events(int fd, struct strfilter *filter,
18 struct strlist *plist);
19int probe_file__del_strlist(int fd, struct strlist *namelist);
20
17 21
18#endif 22#endif
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 29c43c0680a8..bd8f03de5e40 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -70,6 +70,7 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
70 if (!dbg->dwfl) 70 if (!dbg->dwfl)
71 goto error; 71 goto error;
72 72
73 dwfl_report_begin(dbg->dwfl);
73 dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd); 74 dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd);
74 if (!dbg->mod) 75 if (!dbg->mod)
75 goto error; 76 goto error;
@@ -78,6 +79,8 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
78 if (!dbg->dbg) 79 if (!dbg->dbg)
79 goto error; 80 goto error;
80 81
82 dwfl_report_end(dbg->dwfl, NULL, NULL);
83
81 return 0; 84 return 0;
82error: 85error:
83 if (dbg->dwfl) 86 if (dbg->dwfl)
@@ -591,6 +594,7 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
591/* Convert subprogram DIE to trace point */ 594/* Convert subprogram DIE to trace point */
592static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod, 595static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
593 Dwarf_Addr paddr, bool retprobe, 596 Dwarf_Addr paddr, bool retprobe,
597 const char *function,
594 struct probe_trace_point *tp) 598 struct probe_trace_point *tp)
595{ 599{
596 Dwarf_Addr eaddr, highaddr; 600 Dwarf_Addr eaddr, highaddr;
@@ -634,8 +638,10 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
634 /* Return probe must be on the head of a subprogram */ 638 /* Return probe must be on the head of a subprogram */
635 if (retprobe) { 639 if (retprobe) {
636 if (eaddr != paddr) { 640 if (eaddr != paddr) {
637 pr_warning("Return probe must be on the head of" 641 pr_warning("Failed to find \"%s%%return\",\n"
638 " a real function.\n"); 642 " because %s is an inlined function and"
643 " has no return point.\n", function,
644 function);
639 return -EINVAL; 645 return -EINVAL;
640 } 646 }
641 tp->retprobe = true; 647 tp->retprobe = true;
@@ -1175,6 +1181,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1175{ 1181{
1176 struct trace_event_finder *tf = 1182 struct trace_event_finder *tf =
1177 container_of(pf, struct trace_event_finder, pf); 1183 container_of(pf, struct trace_event_finder, pf);
1184 struct perf_probe_point *pp = &pf->pev->point;
1178 struct probe_trace_event *tev; 1185 struct probe_trace_event *tev;
1179 struct perf_probe_arg *args; 1186 struct perf_probe_arg *args;
1180 int ret, i; 1187 int ret, i;
@@ -1189,7 +1196,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1189 1196
1190 /* Trace point should be converted from subprogram DIE */ 1197 /* Trace point should be converted from subprogram DIE */
1191 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, 1198 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
1192 pf->pev->point.retprobe, &tev->point); 1199 pp->retprobe, pp->function, &tev->point);
1193 if (ret < 0) 1200 if (ret < 0)
1194 return ret; 1201 return ret;
1195 1202
@@ -1319,6 +1326,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
1319{ 1326{
1320 struct available_var_finder *af = 1327 struct available_var_finder *af =
1321 container_of(pf, struct available_var_finder, pf); 1328 container_of(pf, struct available_var_finder, pf);
1329 struct perf_probe_point *pp = &pf->pev->point;
1322 struct variable_list *vl; 1330 struct variable_list *vl;
1323 Dwarf_Die die_mem; 1331 Dwarf_Die die_mem;
1324 int ret; 1332 int ret;
@@ -1332,7 +1340,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
1332 1340
1333 /* Trace point should be converted from subprogram DIE */ 1341 /* Trace point should be converted from subprogram DIE */
1334 ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr, 1342 ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
1335 pf->pev->point.retprobe, &vl->point); 1343 pp->retprobe, pp->function, &vl->point);
1336 if (ret < 0) 1344 if (ret < 0)
1337 return ret; 1345 return ret;
1338 1346
@@ -1399,6 +1407,41 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
1399 return (ret < 0) ? ret : af.nvls; 1407 return (ret < 0) ? ret : af.nvls;
1400} 1408}
1401 1409
1410/* For the kernel module, we need a special code to get a DIE */
1411static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
1412{
1413 int n, i;
1414 Elf32_Word shndx;
1415 Elf_Scn *scn;
1416 Elf *elf;
1417 GElf_Shdr mem, *shdr;
1418 const char *p;
1419
1420 elf = dwfl_module_getelf(dbg->mod, &dbg->bias);
1421 if (!elf)
1422 return -EINVAL;
1423
1424 /* Get the number of relocations */
1425 n = dwfl_module_relocations(dbg->mod);
1426 if (n < 0)
1427 return -ENOENT;
1428 /* Search the relocation related .text section */
1429 for (i = 0; i < n; i++) {
1430 p = dwfl_module_relocation_info(dbg->mod, i, &shndx);
1431 if (strcmp(p, ".text") == 0) {
1432 /* OK, get the section header */
1433 scn = elf_getscn(elf, shndx);
1434 if (!scn)
1435 return -ENOENT;
1436 shdr = gelf_getshdr(scn, &mem);
1437 if (!shdr)
1438 return -ENOENT;
1439 *offs = shdr->sh_addr;
1440 }
1441 }
1442 return 0;
1443}
1444
1402/* Reverse search */ 1445/* Reverse search */
1403int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, 1446int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
1404 struct perf_probe_point *ppt) 1447 struct perf_probe_point *ppt)
@@ -1407,9 +1450,16 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
1407 Dwarf_Addr _addr = 0, baseaddr = 0; 1450 Dwarf_Addr _addr = 0, baseaddr = 0;
1408 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; 1451 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
1409 int baseline = 0, lineno = 0, ret = 0; 1452 int baseline = 0, lineno = 0, ret = 0;
1453 bool reloc = false;
1410 1454
1455retry:
1411 /* Find cu die */ 1456 /* Find cu die */
1412 if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { 1457 if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
1458 if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
1459 addr += baseaddr;
1460 reloc = true;
1461 goto retry;
1462 }
1413 pr_warning("Failed to find debug information for address %lx\n", 1463 pr_warning("Failed to find debug information for address %lx\n",
1414 addr); 1464 addr);
1415 ret = -EINVAL; 1465 ret = -EINVAL;
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 6324fe6b161e..98f127abfa42 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -67,6 +67,7 @@ static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
67static PyMemberDef pyrf_mmap_event__members[] = { 67static PyMemberDef pyrf_mmap_event__members[] = {
68 sample_members 68 sample_members
69 member_def(perf_event_header, type, T_UINT, "event type"), 69 member_def(perf_event_header, type, T_UINT, "event type"),
70 member_def(perf_event_header, misc, T_UINT, "event misc"),
70 member_def(mmap_event, pid, T_UINT, "event pid"), 71 member_def(mmap_event, pid, T_UINT, "event pid"),
71 member_def(mmap_event, tid, T_UINT, "event tid"), 72 member_def(mmap_event, tid, T_UINT, "event tid"),
72 member_def(mmap_event, start, T_ULONGLONG, "start of the map"), 73 member_def(mmap_event, start, T_ULONGLONG, "start of the map"),
@@ -297,6 +298,43 @@ static PyTypeObject pyrf_sample_event__type = {
297 .tp_repr = (reprfunc)pyrf_sample_event__repr, 298 .tp_repr = (reprfunc)pyrf_sample_event__repr,
298}; 299};
299 300
301static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
302
303static PyMemberDef pyrf_context_switch_event__members[] = {
304 sample_members
305 member_def(perf_event_header, type, T_UINT, "event type"),
306 member_def(context_switch_event, next_prev_pid, T_UINT, "next/prev pid"),
307 member_def(context_switch_event, next_prev_tid, T_UINT, "next/prev tid"),
308 { .name = NULL, },
309};
310
311static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
312{
313 PyObject *ret;
314 char *s;
315
316 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
317 pevent->event.context_switch.next_prev_pid,
318 pevent->event.context_switch.next_prev_tid,
319 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
320 ret = PyErr_NoMemory();
321 } else {
322 ret = PyString_FromString(s);
323 free(s);
324 }
325 return ret;
326}
327
328static PyTypeObject pyrf_context_switch_event__type = {
329 PyVarObject_HEAD_INIT(NULL, 0)
330 .tp_name = "perf.context_switch_event",
331 .tp_basicsize = sizeof(struct pyrf_event),
332 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
333 .tp_doc = pyrf_context_switch_event__doc,
334 .tp_members = pyrf_context_switch_event__members,
335 .tp_repr = (reprfunc)pyrf_context_switch_event__repr,
336};
337
300static int pyrf_event__setup_types(void) 338static int pyrf_event__setup_types(void)
301{ 339{
302 int err; 340 int err;
@@ -306,6 +344,7 @@ static int pyrf_event__setup_types(void)
306 pyrf_lost_event__type.tp_new = 344 pyrf_lost_event__type.tp_new =
307 pyrf_read_event__type.tp_new = 345 pyrf_read_event__type.tp_new =
308 pyrf_sample_event__type.tp_new = 346 pyrf_sample_event__type.tp_new =
347 pyrf_context_switch_event__type.tp_new =
309 pyrf_throttle_event__type.tp_new = PyType_GenericNew; 348 pyrf_throttle_event__type.tp_new = PyType_GenericNew;
310 err = PyType_Ready(&pyrf_mmap_event__type); 349 err = PyType_Ready(&pyrf_mmap_event__type);
311 if (err < 0) 350 if (err < 0)
@@ -328,6 +367,9 @@ static int pyrf_event__setup_types(void)
328 err = PyType_Ready(&pyrf_sample_event__type); 367 err = PyType_Ready(&pyrf_sample_event__type);
329 if (err < 0) 368 if (err < 0)
330 goto out; 369 goto out;
370 err = PyType_Ready(&pyrf_context_switch_event__type);
371 if (err < 0)
372 goto out;
331out: 373out:
332 return err; 374 return err;
333} 375}
@@ -342,6 +384,8 @@ static PyTypeObject *pyrf_event__type[] = {
342 [PERF_RECORD_FORK] = &pyrf_task_event__type, 384 [PERF_RECORD_FORK] = &pyrf_task_event__type,
343 [PERF_RECORD_READ] = &pyrf_read_event__type, 385 [PERF_RECORD_READ] = &pyrf_read_event__type,
344 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, 386 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
387 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type,
388 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type,
345}; 389};
346 390
347static PyObject *pyrf_event__new(union perf_event *event) 391static PyObject *pyrf_event__new(union perf_event *event)
@@ -349,8 +393,10 @@ static PyObject *pyrf_event__new(union perf_event *event)
349 struct pyrf_event *pevent; 393 struct pyrf_event *pevent;
350 PyTypeObject *ptype; 394 PyTypeObject *ptype;
351 395
352 if (event->header.type < PERF_RECORD_MMAP || 396 if ((event->header.type < PERF_RECORD_MMAP ||
353 event->header.type > PERF_RECORD_SAMPLE) 397 event->header.type > PERF_RECORD_SAMPLE) &&
398 !(event->header.type == PERF_RECORD_SWITCH ||
399 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
354 return NULL; 400 return NULL;
355 401
356 ptype = pyrf_event__type[event->header.type]; 402 ptype = pyrf_event__type[event->header.type];
@@ -528,6 +574,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
528 "exclude_hv", 574 "exclude_hv",
529 "exclude_idle", 575 "exclude_idle",
530 "mmap", 576 "mmap",
577 "context_switch",
531 "comm", 578 "comm",
532 "freq", 579 "freq",
533 "inherit_stat", 580 "inherit_stat",
@@ -553,6 +600,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
553 exclude_hv = 0, 600 exclude_hv = 0,
554 exclude_idle = 0, 601 exclude_idle = 0,
555 mmap = 0, 602 mmap = 0,
603 context_switch = 0,
556 comm = 0, 604 comm = 0,
557 freq = 1, 605 freq = 1,
558 inherit_stat = 0, 606 inherit_stat = 0,
@@ -565,13 +613,13 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
565 int idx = 0; 613 int idx = 0;
566 614
567 if (!PyArg_ParseTupleAndKeywords(args, kwargs, 615 if (!PyArg_ParseTupleAndKeywords(args, kwargs,
568 "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist, 616 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
569 &attr.type, &attr.config, &attr.sample_freq, 617 &attr.type, &attr.config, &attr.sample_freq,
570 &sample_period, &attr.sample_type, 618 &sample_period, &attr.sample_type,
571 &attr.read_format, &disabled, &inherit, 619 &attr.read_format, &disabled, &inherit,
572 &pinned, &exclusive, &exclude_user, 620 &pinned, &exclusive, &exclude_user,
573 &exclude_kernel, &exclude_hv, &exclude_idle, 621 &exclude_kernel, &exclude_hv, &exclude_idle,
574 &mmap, &comm, &freq, &inherit_stat, 622 &mmap, &context_switch, &comm, &freq, &inherit_stat,
575 &enable_on_exec, &task, &watermark, 623 &enable_on_exec, &task, &watermark,
576 &precise_ip, &mmap_data, &sample_id_all, 624 &precise_ip, &mmap_data, &sample_id_all,
577 &attr.wakeup_events, &attr.bp_type, 625 &attr.wakeup_events, &attr.bp_type,
@@ -595,6 +643,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
595 attr.exclude_hv = exclude_hv; 643 attr.exclude_hv = exclude_hv;
596 attr.exclude_idle = exclude_idle; 644 attr.exclude_idle = exclude_idle;
597 attr.mmap = mmap; 645 attr.mmap = mmap;
646 attr.context_switch = context_switch;
598 attr.comm = comm; 647 attr.comm = comm;
599 attr.freq = freq; 648 attr.freq = freq;
600 attr.inherit_stat = inherit_stat; 649 attr.inherit_stat = inherit_stat;
@@ -1019,6 +1068,8 @@ static struct {
1019 PERF_CONST(RECORD_LOST_SAMPLES), 1068 PERF_CONST(RECORD_LOST_SAMPLES),
1020 PERF_CONST(RECORD_SWITCH), 1069 PERF_CONST(RECORD_SWITCH),
1021 PERF_CONST(RECORD_SWITCH_CPU_WIDE), 1070 PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1071
1072 PERF_CONST(RECORD_MISC_SWITCH_OUT),
1022 { .name = NULL, }, 1073 { .name = NULL, },
1023}; 1074};
1024 1075
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 1bd593bbf7a5..544509c159ce 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -221,6 +221,7 @@ static void define_event_symbols(struct event_format *event,
221 break; 221 break;
222 case PRINT_BSTRING: 222 case PRINT_BSTRING:
223 case PRINT_DYNAMIC_ARRAY: 223 case PRINT_DYNAMIC_ARRAY:
224 case PRINT_DYNAMIC_ARRAY_LEN:
224 case PRINT_STRING: 225 case PRINT_STRING:
225 case PRINT_BITMASK: 226 case PRINT_BITMASK:
226 break; 227 break;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index ace2484985cb..a8e825fca42a 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -251,6 +251,7 @@ static void define_event_symbols(struct event_format *event,
251 /* gcc warns for these? */ 251 /* gcc warns for these? */
252 case PRINT_BSTRING: 252 case PRINT_BSTRING:
253 case PRINT_DYNAMIC_ARRAY: 253 case PRINT_DYNAMIC_ARRAY:
254 case PRINT_DYNAMIC_ARRAY_LEN:
254 case PRINT_FUNC: 255 case PRINT_FUNC:
255 case PRINT_BITMASK: 256 case PRINT_BITMASK:
256 /* we should warn... */ 257 /* we should warn... */
@@ -318,7 +319,7 @@ static PyObject *python_process_callchain(struct perf_sample *sample,
318 319
319 if (thread__resolve_callchain(al->thread, evsel, 320 if (thread__resolve_callchain(al->thread, evsel,
320 sample, NULL, NULL, 321 sample, NULL, NULL,
321 PERF_MAX_STACK_DEPTH) != 0) { 322 scripting_max_stack) != 0) {
322 pr_err("Failed to resolve callchain. Skipping\n"); 323 pr_err("Failed to resolve callchain. Skipping\n");
323 goto exit; 324 goto exit;
324 } 325 }
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index fc3f7c922f99..428149bc64d2 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -138,6 +138,8 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
138 perf_session__set_id_hdr_size(session); 138 perf_session__set_id_hdr_size(session);
139 perf_session__set_comm_exec(session); 139 perf_session__set_comm_exec(session);
140 } 140 }
141 } else {
142 session->machines.host.env = &perf_env;
141 } 143 }
142 144
143 if (!file || perf_data_file__is_write(file)) { 145 if (!file || perf_data_file__is_write(file)) {
@@ -170,30 +172,13 @@ static void perf_session__delete_threads(struct perf_session *session)
170 machine__delete_threads(&session->machines.host); 172 machine__delete_threads(&session->machines.host);
171} 173}
172 174
173static void perf_session_env__exit(struct perf_env *env)
174{
175 zfree(&env->hostname);
176 zfree(&env->os_release);
177 zfree(&env->version);
178 zfree(&env->arch);
179 zfree(&env->cpu_desc);
180 zfree(&env->cpuid);
181
182 zfree(&env->cmdline);
183 zfree(&env->cmdline_argv);
184 zfree(&env->sibling_cores);
185 zfree(&env->sibling_threads);
186 zfree(&env->numa_nodes);
187 zfree(&env->pmu_mappings);
188}
189
190void perf_session__delete(struct perf_session *session) 175void perf_session__delete(struct perf_session *session)
191{ 176{
192 auxtrace__free(session); 177 auxtrace__free(session);
193 auxtrace_index__free(&session->auxtrace_index); 178 auxtrace_index__free(&session->auxtrace_index);
194 perf_session__destroy_kernel_maps(session); 179 perf_session__destroy_kernel_maps(session);
195 perf_session__delete_threads(session); 180 perf_session__delete_threads(session);
196 perf_session_env__exit(&session->header.env); 181 perf_env__exit(&session->header.env);
197 machines__exit(&session->machines); 182 machines__exit(&session->machines);
198 if (session->file) 183 if (session->file)
199 perf_data_file__close(session->file); 184 perf_data_file__close(session->file);
@@ -1079,11 +1064,11 @@ static int machines__deliver_event(struct machines *machines,
1079 1064
1080 switch (event->header.type) { 1065 switch (event->header.type) {
1081 case PERF_RECORD_SAMPLE: 1066 case PERF_RECORD_SAMPLE:
1082 dump_sample(evsel, event, sample);
1083 if (evsel == NULL) { 1067 if (evsel == NULL) {
1084 ++evlist->stats.nr_unknown_id; 1068 ++evlist->stats.nr_unknown_id;
1085 return 0; 1069 return 0;
1086 } 1070 }
1071 dump_sample(evsel, event, sample);
1087 if (machine == NULL) { 1072 if (machine == NULL) {
1088 ++evlist->stats.nr_unprocessable_samples; 1073 ++evlist->stats.nr_unprocessable_samples;
1089 return 0; 1074 return 0;
@@ -1116,6 +1101,9 @@ static int machines__deliver_event(struct machines *machines,
1116 case PERF_RECORD_UNTHROTTLE: 1101 case PERF_RECORD_UNTHROTTLE:
1117 return tool->unthrottle(tool, event, sample, machine); 1102 return tool->unthrottle(tool, event, sample, machine);
1118 case PERF_RECORD_AUX: 1103 case PERF_RECORD_AUX:
1104 if (tool->aux == perf_event__process_aux &&
1105 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED))
1106 evlist->stats.total_aux_lost += 1;
1119 return tool->aux(tool, event, sample, machine); 1107 return tool->aux(tool, event, sample, machine);
1120 case PERF_RECORD_ITRACE_START: 1108 case PERF_RECORD_ITRACE_START:
1121 return tool->itrace_start(tool, event, sample, machine); 1109 return tool->itrace_start(tool, event, sample, machine);
@@ -1323,7 +1311,7 @@ struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1323 return machine__findnew_thread(&session->machines.host, -1, pid); 1311 return machine__findnew_thread(&session->machines.host, -1, pid);
1324} 1312}
1325 1313
1326static struct thread *perf_session__register_idle_thread(struct perf_session *session) 1314struct thread *perf_session__register_idle_thread(struct perf_session *session)
1327{ 1315{
1328 struct thread *thread; 1316 struct thread *thread;
1329 1317
@@ -1361,6 +1349,13 @@ static void perf_session__warn_about_errors(const struct perf_session *session)
1361 } 1349 }
1362 } 1350 }
1363 1351
1352 if (session->tool->aux == perf_event__process_aux &&
1353 stats->total_aux_lost != 0) {
1354 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1355 stats->total_aux_lost,
1356 stats->nr_events[PERF_RECORD_AUX]);
1357 }
1358
1364 if (stats->nr_unknown_events != 0) { 1359 if (stats->nr_unknown_events != 0) {
1365 ui__warning("Found %u unknown events!\n\n" 1360 ui__warning("Found %u unknown events!\n\n"
1366 "Is this an older tool processing a perf.data " 1361 "Is this an older tool processing a perf.data "
@@ -1805,7 +1800,7 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
1805 1800
1806 if (thread__resolve_callchain(al->thread, evsel, 1801 if (thread__resolve_callchain(al->thread, evsel,
1807 sample, NULL, NULL, 1802 sample, NULL, NULL,
1808 PERF_MAX_STACK_DEPTH) != 0) { 1803 stack_depth) != 0) {
1809 if (verbose) 1804 if (verbose)
1810 error("Failed to resolve callchain. Skipping\n"); 1805 error("Failed to resolve callchain. Skipping\n");
1811 return; 1806 return;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index b44afc75d1cc..3e900c0efc73 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -89,6 +89,8 @@ struct machine *perf_session__findnew_machine(struct perf_session *session, pid_
89} 89}
90 90
91struct thread *perf_session__findnew(struct perf_session *session, pid_t pid); 91struct thread *perf_session__findnew(struct perf_session *session, pid_t pid);
92struct thread *perf_session__register_idle_thread(struct perf_session *session);
93
92size_t perf_session__fprintf(struct perf_session *session, FILE *fp); 94size_t perf_session__fprintf(struct perf_session *session, FILE *fp);
93 95
94size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp); 96size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 7e3871606df3..2d8ccd4d9e1b 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -21,6 +21,7 @@ int sort__need_collapse = 0;
21int sort__has_parent = 0; 21int sort__has_parent = 0;
22int sort__has_sym = 0; 22int sort__has_sym = 0;
23int sort__has_dso = 0; 23int sort__has_dso = 0;
24int sort__has_socket = 0;
24enum sort_mode sort__mode = SORT_MODE__NORMAL; 25enum sort_mode sort__mode = SORT_MODE__NORMAL;
25 26
26 27
@@ -328,8 +329,8 @@ static char *get_srcfile(struct hist_entry *e)
328 char *sf, *p; 329 char *sf, *p;
329 struct map *map = e->ms.map; 330 struct map *map = e->ms.map;
330 331
331 sf = get_srcline(map->dso, map__rip_2objdump(map, e->ip), 332 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
332 e->ms.sym, true); 333 e->ms.sym, false, true);
333 if (!strcmp(sf, SRCLINE_UNKNOWN)) 334 if (!strcmp(sf, SRCLINE_UNKNOWN))
334 return no_srcfile; 335 return no_srcfile;
335 p = strchr(sf, ':'); 336 p = strchr(sf, ':');
@@ -421,6 +422,27 @@ struct sort_entry sort_cpu = {
421 .se_width_idx = HISTC_CPU, 422 .se_width_idx = HISTC_CPU,
422}; 423};
423 424
425/* --sort socket */
426
427static int64_t
428sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
429{
430 return right->socket - left->socket;
431}
432
433static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
434 size_t size, unsigned int width)
435{
436 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
437}
438
439struct sort_entry sort_socket = {
440 .se_header = "Socket",
441 .se_cmp = sort__socket_cmp,
442 .se_snprintf = hist_entry__socket_snprintf,
443 .se_width_idx = HISTC_SOCKET,
444};
445
424/* sort keys for branch stacks */ 446/* sort keys for branch stacks */
425 447
426static int64_t 448static int64_t
@@ -633,6 +655,35 @@ static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
633} 655}
634 656
635static int64_t 657static int64_t
658sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
659{
660 uint64_t l = 0, r = 0;
661
662 if (left->mem_info)
663 l = left->mem_info->iaddr.addr;
664 if (right->mem_info)
665 r = right->mem_info->iaddr.addr;
666
667 return (int64_t)(r - l);
668}
669
670static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
671 size_t size, unsigned int width)
672{
673 uint64_t addr = 0;
674 struct map *map = NULL;
675 struct symbol *sym = NULL;
676
677 if (he->mem_info) {
678 addr = he->mem_info->iaddr.addr;
679 map = he->mem_info->iaddr.map;
680 sym = he->mem_info->iaddr.sym;
681 }
682 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
683 width);
684}
685
686static int64_t
636sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 687sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
637{ 688{
638 struct map *map_l = NULL; 689 struct map *map_l = NULL;
@@ -1055,6 +1106,13 @@ struct sort_entry sort_mem_daddr_sym = {
1055 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1106 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1056}; 1107};
1057 1108
1109struct sort_entry sort_mem_iaddr_sym = {
1110 .se_header = "Code Symbol",
1111 .se_cmp = sort__iaddr_cmp,
1112 .se_snprintf = hist_entry__iaddr_snprintf,
1113 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1114};
1115
1058struct sort_entry sort_mem_daddr_dso = { 1116struct sort_entry sort_mem_daddr_dso = {
1059 .se_header = "Data Object", 1117 .se_header = "Data Object",
1060 .se_cmp = sort__dso_daddr_cmp, 1118 .se_cmp = sort__dso_daddr_cmp,
@@ -1248,6 +1306,7 @@ static struct sort_dimension common_sort_dimensions[] = {
1248 DIM(SORT_SYM, "symbol", sort_sym), 1306 DIM(SORT_SYM, "symbol", sort_sym),
1249 DIM(SORT_PARENT, "parent", sort_parent), 1307 DIM(SORT_PARENT, "parent", sort_parent),
1250 DIM(SORT_CPU, "cpu", sort_cpu), 1308 DIM(SORT_CPU, "cpu", sort_cpu),
1309 DIM(SORT_SOCKET, "socket", sort_socket),
1251 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1310 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1252 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1311 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1253 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1312 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
@@ -1276,6 +1335,7 @@ static struct sort_dimension bstack_sort_dimensions[] = {
1276 1335
1277static struct sort_dimension memory_sort_dimensions[] = { 1336static struct sort_dimension memory_sort_dimensions[] = {
1278 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1337 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1338 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1279 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1339 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1280 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1340 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1281 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1341 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
@@ -1517,6 +1577,12 @@ static int __hpp_dimension__add_output(struct hpp_dimension *hd)
1517 return 0; 1577 return 0;
1518} 1578}
1519 1579
1580int hpp_dimension__add_output(unsigned col)
1581{
1582 BUG_ON(col >= PERF_HPP__MAX_INDEX);
1583 return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
1584}
1585
1520int sort_dimension__add(const char *tok) 1586int sort_dimension__add(const char *tok)
1521{ 1587{
1522 unsigned int i; 1588 unsigned int i;
@@ -1550,6 +1616,8 @@ int sort_dimension__add(const char *tok)
1550 1616
1551 } else if (sd->entry == &sort_dso) { 1617 } else if (sd->entry == &sort_dso) {
1552 sort__has_dso = 1; 1618 sort__has_dso = 1;
1619 } else if (sd->entry == &sort_socket) {
1620 sort__has_socket = 1;
1553 } 1621 }
1554 1622
1555 return __sort_dimension__add(sd); 1623 return __sort_dimension__add(sd);
@@ -1855,8 +1923,6 @@ static int __setup_output_field(void)
1855 if (field_order == NULL) 1923 if (field_order == NULL)
1856 return 0; 1924 return 0;
1857 1925
1858 reset_dimensions();
1859
1860 strp = str = strdup(field_order); 1926 strp = str = strdup(field_order);
1861 if (str == NULL) { 1927 if (str == NULL) {
1862 error("Not enough memory to setup output fields"); 1928 error("Not enough memory to setup output fields");
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 3c2a399f8f5b..31228851e397 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -34,6 +34,7 @@ extern int have_ignore_callees;
34extern int sort__need_collapse; 34extern int sort__need_collapse;
35extern int sort__has_parent; 35extern int sort__has_parent;
36extern int sort__has_sym; 36extern int sort__has_sym;
37extern int sort__has_socket;
37extern enum sort_mode sort__mode; 38extern enum sort_mode sort__mode;
38extern struct sort_entry sort_comm; 39extern struct sort_entry sort_comm;
39extern struct sort_entry sort_dso; 40extern struct sort_entry sort_dso;
@@ -90,6 +91,7 @@ struct hist_entry {
90 struct comm *comm; 91 struct comm *comm;
91 u64 ip; 92 u64 ip;
92 u64 transaction; 93 u64 transaction;
94 s32 socket;
93 s32 cpu; 95 s32 cpu;
94 u8 cpumode; 96 u8 cpumode;
95 97
@@ -172,6 +174,7 @@ enum sort_type {
172 SORT_SYM, 174 SORT_SYM,
173 SORT_PARENT, 175 SORT_PARENT,
174 SORT_CPU, 176 SORT_CPU,
177 SORT_SOCKET,
175 SORT_SRCLINE, 178 SORT_SRCLINE,
176 SORT_SRCFILE, 179 SORT_SRCFILE,
177 SORT_LOCAL_WEIGHT, 180 SORT_LOCAL_WEIGHT,
@@ -198,6 +201,7 @@ enum sort_type {
198 SORT_MEM_LVL, 201 SORT_MEM_LVL,
199 SORT_MEM_SNOOP, 202 SORT_MEM_SNOOP,
200 SORT_MEM_DCACHELINE, 203 SORT_MEM_DCACHELINE,
204 SORT_MEM_IADDR_SYMBOL,
201}; 205};
202 206
203/* 207/*
@@ -230,4 +234,6 @@ void perf_hpp__set_elide(int idx, bool elide);
230int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset); 234int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
231 235
232bool is_strict_order(const char *order); 236bool is_strict_order(const char *order);
237
238int hpp_dimension__add_output(unsigned col);
233#endif /* __PERF_SORT_H */ 239#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index fc08248f08ca..b4db3f48e3b0 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -149,8 +149,11 @@ static void addr2line_cleanup(struct a2l_data *a2l)
149 free(a2l); 149 free(a2l);
150} 150}
151 151
152#define MAX_INLINE_NEST 1024
153
152static int addr2line(const char *dso_name, u64 addr, 154static int addr2line(const char *dso_name, u64 addr,
153 char **file, unsigned int *line, struct dso *dso) 155 char **file, unsigned int *line, struct dso *dso,
156 bool unwind_inlines)
154{ 157{
155 int ret = 0; 158 int ret = 0;
156 struct a2l_data *a2l = dso->a2l; 159 struct a2l_data *a2l = dso->a2l;
@@ -170,6 +173,15 @@ static int addr2line(const char *dso_name, u64 addr,
170 173
171 bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l); 174 bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
172 175
176 if (a2l->found && unwind_inlines) {
177 int cnt = 0;
178
179 while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
180 &a2l->funcname, &a2l->line) &&
181 cnt++ < MAX_INLINE_NEST)
182 ;
183 }
184
173 if (a2l->found && a2l->filename) { 185 if (a2l->found && a2l->filename) {
174 *file = strdup(a2l->filename); 186 *file = strdup(a2l->filename);
175 *line = a2l->line; 187 *line = a2l->line;
@@ -197,7 +209,8 @@ void dso__free_a2l(struct dso *dso)
197 209
198static int addr2line(const char *dso_name, u64 addr, 210static int addr2line(const char *dso_name, u64 addr,
199 char **file, unsigned int *line_nr, 211 char **file, unsigned int *line_nr,
200 struct dso *dso __maybe_unused) 212 struct dso *dso __maybe_unused,
213 bool unwind_inlines __maybe_unused)
201{ 214{
202 FILE *fp; 215 FILE *fp;
203 char cmd[PATH_MAX]; 216 char cmd[PATH_MAX];
@@ -254,8 +267,8 @@ void dso__free_a2l(struct dso *dso __maybe_unused)
254 */ 267 */
255#define A2L_FAIL_LIMIT 123 268#define A2L_FAIL_LIMIT 123
256 269
257char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym, 270char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
258 bool show_sym) 271 bool show_sym, bool unwind_inlines)
259{ 272{
260 char *file = NULL; 273 char *file = NULL;
261 unsigned line = 0; 274 unsigned line = 0;
@@ -276,7 +289,7 @@ char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
276 if (!strncmp(dso_name, "/tmp/perf-", 10)) 289 if (!strncmp(dso_name, "/tmp/perf-", 10))
277 goto out; 290 goto out;
278 291
279 if (!addr2line(dso_name, addr, &file, &line, dso)) 292 if (!addr2line(dso_name, addr, &file, &line, dso, unwind_inlines))
280 goto out; 293 goto out;
281 294
282 if (asprintf(&srcline, "%s:%u", 295 if (asprintf(&srcline, "%s:%u",
@@ -310,3 +323,9 @@ void free_srcline(char *srcline)
310 if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0) 323 if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0)
311 free(srcline); 324 free(srcline);
312} 325}
326
327char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
328 bool show_sym)
329{
330 return __get_srcline(dso, addr, sym, show_sym, false);
331}
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 2d065d065b67..2d9d8306dbd3 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -67,7 +67,7 @@ double rel_stddev_stats(double stddev, double avg)
67bool __perf_evsel_stat__is(struct perf_evsel *evsel, 67bool __perf_evsel_stat__is(struct perf_evsel *evsel,
68 enum perf_stat_evsel_id id) 68 enum perf_stat_evsel_id id)
69{ 69{
70 struct perf_stat *ps = evsel->priv; 70 struct perf_stat_evsel *ps = evsel->priv;
71 71
72 return ps->id == id; 72 return ps->id == id;
73} 73}
@@ -84,7 +84,7 @@ static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
84 84
85void perf_stat_evsel_id_init(struct perf_evsel *evsel) 85void perf_stat_evsel_id_init(struct perf_evsel *evsel)
86{ 86{
87 struct perf_stat *ps = evsel->priv; 87 struct perf_stat_evsel *ps = evsel->priv;
88 int i; 88 int i;
89 89
90 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */ 90 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
@@ -100,7 +100,7 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel)
100void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) 100void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
101{ 101{
102 int i; 102 int i;
103 struct perf_stat *ps = evsel->priv; 103 struct perf_stat_evsel *ps = evsel->priv;
104 104
105 for (i = 0; i < 3; i++) 105 for (i = 0; i < 3; i++)
106 init_stats(&ps->res_stats[i]); 106 init_stats(&ps->res_stats[i]);
@@ -110,7 +110,7 @@ void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
110 110
111int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) 111int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
112{ 112{
113 evsel->priv = zalloc(sizeof(struct perf_stat)); 113 evsel->priv = zalloc(sizeof(struct perf_stat_evsel));
114 if (evsel->priv == NULL) 114 if (evsel->priv == NULL)
115 return -ENOMEM; 115 return -ENOMEM;
116 perf_evsel__reset_stat_priv(evsel); 116 perf_evsel__reset_stat_priv(evsel);
@@ -230,7 +230,7 @@ static int check_per_pkg(struct perf_evsel *counter,
230 if (!(vals->run && vals->ena)) 230 if (!(vals->run && vals->ena))
231 return 0; 231 return 0;
232 232
233 s = cpu_map__get_socket(cpus, cpu); 233 s = cpu_map__get_socket(cpus, cpu, NULL);
234 if (s < 0) 234 if (s < 0)
235 return -1; 235 return -1;
236 236
@@ -272,6 +272,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
272 aggr->ena += count->ena; 272 aggr->ena += count->ena;
273 aggr->run += count->run; 273 aggr->run += count->run;
274 } 274 }
275 case AGGR_UNSET:
275 default: 276 default:
276 break; 277 break;
277 } 278 }
@@ -304,7 +305,7 @@ int perf_stat_process_counter(struct perf_stat_config *config,
304 struct perf_evsel *counter) 305 struct perf_evsel *counter)
305{ 306{
306 struct perf_counts_values *aggr = &counter->counts->aggr; 307 struct perf_counts_values *aggr = &counter->counts->aggr;
307 struct perf_stat *ps = counter->priv; 308 struct perf_stat_evsel *ps = counter->priv;
308 u64 *count = counter->counts->aggr.values; 309 u64 *count = counter->counts->aggr.values;
309 int i, ret; 310 int i, ret;
310 311
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 62448c8175d3..da1d11c4f8c1 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -20,7 +20,7 @@ enum perf_stat_evsel_id {
20 PERF_STAT_EVSEL_ID__MAX, 20 PERF_STAT_EVSEL_ID__MAX,
21}; 21};
22 22
23struct perf_stat { 23struct perf_stat_evsel {
24 struct stats res_stats[3]; 24 struct stats res_stats[3];
25 enum perf_stat_evsel_id id; 25 enum perf_stat_evsel_id id;
26}; 26};
@@ -31,6 +31,7 @@ enum aggr_mode {
31 AGGR_SOCKET, 31 AGGR_SOCKET,
32 AGGR_CORE, 32 AGGR_CORE,
33 AGGR_THREAD, 33 AGGR_THREAD,
34 AGGR_UNSET,
34}; 35};
35 36
36struct perf_stat_config { 37struct perf_stat_config {
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 4abe23550c73..25671fa16618 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -82,23 +82,22 @@ void strbuf_add(struct strbuf *sb, const void *data, size_t len)
82 strbuf_setlen(sb, sb->len + len); 82 strbuf_setlen(sb, sb->len + len);
83} 83}
84 84
85void strbuf_addf(struct strbuf *sb, const char *fmt, ...) 85void strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
86{ 86{
87 int len; 87 int len;
88 va_list ap; 88 va_list ap_saved;
89 89
90 if (!strbuf_avail(sb)) 90 if (!strbuf_avail(sb))
91 strbuf_grow(sb, 64); 91 strbuf_grow(sb, 64);
92 va_start(ap, fmt); 92
93 va_copy(ap_saved, ap);
93 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); 94 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
94 va_end(ap);
95 if (len < 0) 95 if (len < 0)
96 die("your vsnprintf is broken"); 96 die("your vsnprintf is broken");
97 if (len > strbuf_avail(sb)) { 97 if (len > strbuf_avail(sb)) {
98 strbuf_grow(sb, len); 98 strbuf_grow(sb, len);
99 va_start(ap, fmt); 99 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
100 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); 100 va_end(ap_saved);
101 va_end(ap);
102 if (len > strbuf_avail(sb)) { 101 if (len > strbuf_avail(sb)) {
103 die("this should not happen, your vsnprintf is broken"); 102 die("this should not happen, your vsnprintf is broken");
104 } 103 }
@@ -106,6 +105,15 @@ void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
106 strbuf_setlen(sb, sb->len + len); 105 strbuf_setlen(sb, sb->len + len);
107} 106}
108 107
108void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
109{
110 va_list ap;
111
112 va_start(ap, fmt);
113 strbuf_addv(sb, fmt, ap);
114 va_end(ap);
115}
116
109ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint) 117ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
110{ 118{
111 size_t oldlen = sb->len; 119 size_t oldlen = sb->len;
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index 436ac319f6c7..529f2f035249 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -39,6 +39,7 @@
39 */ 39 */
40 40
41#include <assert.h> 41#include <assert.h>
42#include <stdarg.h>
42 43
43extern char strbuf_slopbuf[]; 44extern char strbuf_slopbuf[];
44struct strbuf { 45struct strbuf {
@@ -85,6 +86,7 @@ static inline void strbuf_addstr(struct strbuf *sb, const char *s) {
85 86
86__attribute__((format(printf,2,3))) 87__attribute__((format(printf,2,3)))
87extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...); 88extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...);
89extern void strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap);
88 90
89/* XXX: if read fails, any partial read is undone */ 91/* XXX: if read fails, any partial read is undone */
90extern ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint); 92extern ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint);
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index fd8477cacf88..48906333a858 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -337,7 +337,7 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
337 symbol_filter_t filter __maybe_unused, 337 symbol_filter_t filter __maybe_unused,
338 int kmodule __maybe_unused) 338 int kmodule __maybe_unused)
339{ 339{
340 unsigned char *build_id[BUILD_ID_SIZE]; 340 unsigned char build_id[BUILD_ID_SIZE];
341 int ret; 341 int ret;
342 342
343 ret = fd__is_64_bit(ss->fd); 343 ret = fd__is_64_bit(ss->fd);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1f97ffb158a6..b4cc7662677e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -624,7 +624,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
624 * symbols, setting length to 0, and rely on 624 * symbols, setting length to 0, and rely on
625 * symbols__fixup_end() to fix it up. 625 * symbols__fixup_end() to fix it up.
626 */ 626 */
627 sym = symbol__new(start, 0, kallsyms2elf_type(type), name); 627 sym = symbol__new(start, 0, kallsyms2elf_binding(type), name);
628 if (sym == NULL) 628 if (sym == NULL)
629 return -ENOMEM; 629 return -ENOMEM;
630 /* 630 /*
@@ -680,7 +680,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
680 pos->start -= curr_map->start - curr_map->pgoff; 680 pos->start -= curr_map->start - curr_map->pgoff;
681 if (pos->end) 681 if (pos->end)
682 pos->end -= curr_map->start - curr_map->pgoff; 682 pos->end -= curr_map->start - curr_map->pgoff;
683 if (curr_map != map) { 683 if (curr_map->dso != map->dso) {
684 rb_erase_init(&pos->rb_node, root); 684 rb_erase_init(&pos->rb_node, root);
685 symbols__insert( 685 symbols__insert(
686 &curr_map->dso->symbols[curr_map->type], 686 &curr_map->dso->symbols[curr_map->type],
@@ -1406,6 +1406,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1406 struct symsrc ss_[2]; 1406 struct symsrc ss_[2];
1407 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1407 struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1408 bool kmod; 1408 bool kmod;
1409 unsigned char build_id[BUILD_ID_SIZE];
1409 1410
1410 pthread_mutex_lock(&dso->lock); 1411 pthread_mutex_lock(&dso->lock);
1411 1412
@@ -1461,6 +1462,14 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1461 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || 1462 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1462 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 1463 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1463 1464
1465
1466 /*
1467 * Read the build id if possible. This is required for
1468 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1469 */
1470 if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0)
1471 dso__set_build_id(dso, build_id);
1472
1464 /* 1473 /*
1465 * Iterate over candidate debug images. 1474 * Iterate over candidate debug images.
1466 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1475 * Keep track of "interesting" ones (those which have a symtab, dynsym,
@@ -1607,6 +1616,15 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1607 int i, err = 0; 1616 int i, err = 0;
1608 char *filename = NULL; 1617 char *filename = NULL;
1609 1618
1619 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1620 vmlinux_path__nr_entries + 1);
1621
1622 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1623 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
1624 if (err > 0)
1625 goto out;
1626 }
1627
1610 if (!symbol_conf.ignore_vmlinux_buildid) 1628 if (!symbol_conf.ignore_vmlinux_buildid)
1611 filename = dso__build_id_filename(dso, NULL, 0); 1629 filename = dso__build_id_filename(dso, NULL, 0);
1612 if (filename != NULL) { 1630 if (filename != NULL) {
@@ -1615,15 +1633,6 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1615 goto out; 1633 goto out;
1616 free(filename); 1634 free(filename);
1617 } 1635 }
1618
1619 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1620 vmlinux_path__nr_entries + 1);
1621
1622 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1623 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
1624 if (err > 0)
1625 break;
1626 }
1627out: 1636out:
1628 return err; 1637 return err;
1629} 1638}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 440ba8ae888f..40073c60b83d 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -191,6 +191,7 @@ struct addr_location {
191 u8 filtered; 191 u8 filtered;
192 u8 cpumode; 192 u8 cpumode;
193 s32 cpu; 193 s32 cpu;
194 s32 socket;
194}; 195};
195 196
196struct symsrc { 197struct symsrc {
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 22245986e59e..d995743cb673 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -38,7 +38,7 @@
38 38
39#include "../perf.h" 39#include "../perf.h"
40#include "trace-event.h" 40#include "trace-event.h"
41#include <api/fs/debugfs.h> 41#include <api/fs/tracing_path.h>
42#include "evsel.h" 42#include "evsel.h"
43#include "debug.h" 43#include "debug.h"
44 44
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index b90e646c7a91..802bb868d446 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -7,7 +7,9 @@
7#include <sys/stat.h> 7#include <sys/stat.h>
8#include <fcntl.h> 8#include <fcntl.h>
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/err.h>
10#include <traceevent/event-parse.h> 11#include <traceevent/event-parse.h>
12#include <api/fs/tracing_path.h>
11#include "trace-event.h" 13#include "trace-event.h"
12#include "machine.h" 14#include "machine.h"
13#include "util.h" 15#include "util.h"
@@ -65,6 +67,9 @@ void trace_event__cleanup(struct trace_event *t)
65 pevent_free(t->pevent); 67 pevent_free(t->pevent);
66} 68}
67 69
70/*
71 * Returns pointer with encoded error via <linux/err.h> interface.
72 */
68static struct event_format* 73static struct event_format*
69tp_format(const char *sys, const char *name) 74tp_format(const char *sys, const char *name)
70{ 75{
@@ -73,12 +78,14 @@ tp_format(const char *sys, const char *name)
73 char path[PATH_MAX]; 78 char path[PATH_MAX];
74 size_t size; 79 size_t size;
75 char *data; 80 char *data;
81 int err;
76 82
77 scnprintf(path, PATH_MAX, "%s/%s/%s/format", 83 scnprintf(path, PATH_MAX, "%s/%s/%s/format",
78 tracing_events_path, sys, name); 84 tracing_events_path, sys, name);
79 85
80 if (filename__read_str(path, &data, &size)) 86 err = filename__read_str(path, &data, &size);
81 return NULL; 87 if (err)
88 return ERR_PTR(err);
82 89
83 pevent_parse_format(pevent, &event, data, size, sys); 90 pevent_parse_format(pevent, &event, data, size, sys);
84 91
@@ -86,11 +93,14 @@ tp_format(const char *sys, const char *name)
86 return event; 93 return event;
87} 94}
88 95
96/*
97 * Returns pointer with encoded error via <linux/err.h> interface.
98 */
89struct event_format* 99struct event_format*
90trace_event__tp_format(const char *sys, const char *name) 100trace_event__tp_format(const char *sys, const char *name)
91{ 101{
92 if (!tevent_initialized && trace_event__init2()) 102 if (!tevent_initialized && trace_event__init2())
93 return NULL; 103 return ERR_PTR(-ENOMEM);
94 104
95 return tp_format(sys, name); 105 return tp_format(sys, name);
96} 106}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index da6cc4cc2a4f..b85ee55cca0c 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -78,6 +78,8 @@ struct scripting_ops {
78 int (*generate_script) (struct pevent *pevent, const char *outfile); 78 int (*generate_script) (struct pevent *pevent, const char *outfile);
79}; 79};
80 80
81extern unsigned int scripting_max_stack;
82
81int script_spec_register(const char *spec, struct scripting_ops *ops); 83int script_spec_register(const char *spec, struct scripting_ops *ops);
82 84
83void setup_perl_scripting(void); 85void setup_perl_scripting(void);
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 4c00507ee3fd..c83832b555e5 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -330,6 +330,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
330 struct map *map; 330 struct map *map;
331 unw_dyn_info_t di; 331 unw_dyn_info_t di;
332 u64 table_data, segbase, fde_count; 332 u64 table_data, segbase, fde_count;
333 int ret = -EINVAL;
333 334
334 map = find_map(ip, ui); 335 map = find_map(ip, ui);
335 if (!map || !map->dso) 336 if (!map || !map->dso)
@@ -348,29 +349,33 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
348 di.u.rti.table_data = map->start + table_data; 349 di.u.rti.table_data = map->start + table_data;
349 di.u.rti.table_len = fde_count * sizeof(struct table_entry) 350 di.u.rti.table_len = fde_count * sizeof(struct table_entry)
350 / sizeof(unw_word_t); 351 / sizeof(unw_word_t);
351 return dwarf_search_unwind_table(as, ip, &di, pi, 352 ret = dwarf_search_unwind_table(as, ip, &di, pi,
352 need_unwind_info, arg); 353 need_unwind_info, arg);
353 } 354 }
354 355
355#ifndef NO_LIBUNWIND_DEBUG_FRAME 356#ifndef NO_LIBUNWIND_DEBUG_FRAME
356 /* Check the .debug_frame section for unwinding info */ 357 /* Check the .debug_frame section for unwinding info */
357 if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) { 358 if (ret < 0 &&
359 !read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
358 int fd = dso__data_get_fd(map->dso, ui->machine); 360 int fd = dso__data_get_fd(map->dso, ui->machine);
359 int is_exec = elf_is_exec(fd, map->dso->name); 361 int is_exec = elf_is_exec(fd, map->dso->name);
360 unw_word_t base = is_exec ? 0 : map->start; 362 unw_word_t base = is_exec ? 0 : map->start;
363 const char *symfile;
361 364
362 if (fd >= 0) 365 if (fd >= 0)
363 dso__data_put_fd(map->dso); 366 dso__data_put_fd(map->dso);
364 367
368 symfile = map->dso->symsrc_filename ?: map->dso->name;
369
365 memset(&di, 0, sizeof(di)); 370 memset(&di, 0, sizeof(di));
366 if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name, 371 if (dwarf_find_debug_frame(0, &di, ip, base, symfile,
367 map->start, map->end)) 372 map->start, map->end))
368 return dwarf_search_unwind_table(as, ip, &di, pi, 373 return dwarf_search_unwind_table(as, ip, &di, pi,
369 need_unwind_info, arg); 374 need_unwind_info, arg);
370 } 375 }
371#endif 376#endif
372 377
373 return -EINVAL; 378 return ret;
374} 379}
375 380
376static int access_fpreg(unw_addr_space_t __maybe_unused as, 381static int access_fpreg(unw_addr_space_t __maybe_unused as,
@@ -461,7 +466,7 @@ static int access_mem(unw_addr_space_t __maybe_unused as,
461 if (ret) { 466 if (ret) {
462 pr_debug("unwind: access_mem %p not inside range" 467 pr_debug("unwind: access_mem %p not inside range"
463 " 0x%" PRIx64 "-0x%" PRIx64 "\n", 468 " 0x%" PRIx64 "-0x%" PRIx64 "\n",
464 (void *) addr, start, end); 469 (void *) (uintptr_t) addr, start, end);
465 *valp = 0; 470 *valp = 0;
466 return ret; 471 return ret;
467 } 472 }
@@ -471,7 +476,7 @@ static int access_mem(unw_addr_space_t __maybe_unused as,
471 offset = addr - start; 476 offset = addr - start;
472 *valp = *(unw_word_t *)&stack->data[offset]; 477 *valp = *(unw_word_t *)&stack->data[offset];
473 pr_debug("unwind: access_mem addr %p val %lx, offset %d\n", 478 pr_debug("unwind: access_mem addr %p val %lx, offset %d\n",
474 (void *) addr, (unsigned long)*valp, offset); 479 (void *) (uintptr_t) addr, (unsigned long)*valp, offset);
475 return 0; 480 return 0;
476} 481}
477 482
diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c
index 4007aca8e0ca..6adfa18cdd4e 100644
--- a/tools/perf/util/usage.c
+++ b/tools/perf/util/usage.c
@@ -50,6 +50,11 @@ void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN)
50 die_routine = routine; 50 die_routine = routine;
51} 51}
52 52
53void set_warning_routine(void (*routine)(const char *err, va_list params))
54{
55 warn_routine = routine;
56}
57
53void usage(const char *err) 58void usage(const char *err)
54{ 59{
55 usage_routine(err); 60 usage_routine(err);
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index c2cd9bf2348b..cd12c25e4ea4 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -17,7 +17,7 @@
17#include "callchain.h" 17#include "callchain.h"
18 18
19struct callchain_param callchain_param = { 19struct callchain_param callchain_param = {
20 .mode = CHAIN_GRAPH_REL, 20 .mode = CHAIN_GRAPH_ABS,
21 .min_percent = 0.5, 21 .min_percent = 0.5,
22 .order = ORDER_CALLEE, 22 .order = ORDER_CALLEE,
23 .key = CCKEY_FUNCTION 23 .key = CCKEY_FUNCTION
@@ -34,9 +34,6 @@ bool test_attr__enabled;
34bool perf_host = true; 34bool perf_host = true;
35bool perf_guest = false; 35bool perf_guest = false;
36 36
37char tracing_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing";
38char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
39
40void event_attr_init(struct perf_event_attr *attr) 37void event_attr_init(struct perf_event_attr *attr)
41{ 38{
42 if (!perf_host) 39 if (!perf_host)
@@ -390,73 +387,6 @@ void set_term_quiet_input(struct termios *old)
390 tcsetattr(0, TCSANOW, &tc); 387 tcsetattr(0, TCSANOW, &tc);
391} 388}
392 389
393static void set_tracing_events_path(const char *tracing, const char *mountpoint)
394{
395 snprintf(tracing_path, sizeof(tracing_path), "%s/%s",
396 mountpoint, tracing);
397 snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s%s",
398 mountpoint, tracing, "events");
399}
400
401static const char *__perf_tracefs_mount(const char *mountpoint)
402{
403 const char *mnt;
404
405 mnt = tracefs_mount(mountpoint);
406 if (!mnt)
407 return NULL;
408
409 set_tracing_events_path("", mnt);
410
411 return mnt;
412}
413
414static const char *__perf_debugfs_mount(const char *mountpoint)
415{
416 const char *mnt;
417
418 mnt = debugfs_mount(mountpoint);
419 if (!mnt)
420 return NULL;
421
422 set_tracing_events_path("tracing/", mnt);
423
424 return mnt;
425}
426
427const char *perf_debugfs_mount(const char *mountpoint)
428{
429 const char *mnt;
430
431 mnt = __perf_tracefs_mount(mountpoint);
432 if (mnt)
433 return mnt;
434
435 mnt = __perf_debugfs_mount(mountpoint);
436
437 return mnt;
438}
439
440void perf_debugfs_set_path(const char *mntpt)
441{
442 set_tracing_events_path("tracing/", mntpt);
443}
444
445char *get_tracing_file(const char *name)
446{
447 char *file;
448
449 if (asprintf(&file, "%s/%s", tracing_path, name) < 0)
450 return NULL;
451
452 return file;
453}
454
455void put_tracing_file(char *file)
456{
457 free(file);
458}
459
460int parse_nsec_time(const char *str, u64 *ptime) 390int parse_nsec_time(const char *str, u64 *ptime)
461{ 391{
462 u64 time_sec, time_nsec; 392 u64 time_sec, time_nsec;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 291be1d84bc3..4cfb913aa9e0 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -74,8 +74,7 @@
74#include <linux/magic.h> 74#include <linux/magic.h>
75#include <linux/types.h> 75#include <linux/types.h>
76#include <sys/ttydefaults.h> 76#include <sys/ttydefaults.h>
77#include <api/fs/debugfs.h> 77#include <api/fs/tracing_path.h>
78#include <api/fs/tracefs.h>
79#include <termios.h> 78#include <termios.h>
80#include <linux/bitops.h> 79#include <linux/bitops.h>
81#include <termios.h> 80#include <termios.h>
@@ -83,12 +82,6 @@
83extern const char *graph_line; 82extern const char *graph_line;
84extern const char *graph_dotted_line; 83extern const char *graph_dotted_line;
85extern char buildid_dir[]; 84extern char buildid_dir[];
86extern char tracing_path[];
87extern char tracing_events_path[];
88extern void perf_debugfs_set_path(const char *mountpoint);
89const char *perf_debugfs_mount(const char *mountpoint);
90char *get_tracing_file(const char *name);
91void put_tracing_file(char *file);
92 85
93/* On most systems <limits.h> would have given us this, but 86/* On most systems <limits.h> would have given us this, but
94 * not on some systems (e.g. GNU/Hurd). 87 * not on some systems (e.g. GNU/Hurd).
@@ -152,6 +145,7 @@ extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)))
152 145
153 146
154extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); 147extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN);
148extern void set_warning_routine(void (*routine)(const char *err, va_list params));
155 149
156extern int prefixcmp(const char *str, const char *prefix); 150extern int prefixcmp(const char *str, const char *prefix);
157extern void set_buildid_dir(const char *dir); 151extern void set_buildid_dir(const char *dir);
@@ -321,6 +315,8 @@ struct symbol;
321extern bool srcline_full_filename; 315extern bool srcline_full_filename;
322char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym, 316char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
323 bool show_sym); 317 bool show_sym);
318char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
319 bool show_sym, bool unwind_inlines);
324void free_srcline(char *srcline); 320void free_srcline(char *srcline);
325 321
326int filename__read_str(const char *filename, char **buf, size_t *sizep); 322int filename__read_str(const char *filename, char **buf, size_t *sizep);
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index fbe2dbff1e21..f6483609ebc2 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -75,7 +75,7 @@ usage () {
75while test $# -gt 0 75while test $# -gt 0
76do 76do
77 case "$1" in 77 case "$1" in
78 --bootargs) 78 --bootargs|--bootarg)
79 checkarg --bootargs "(list of kernel boot arguments)" "$#" "$2" '.*' '^--' 79 checkarg --bootargs "(list of kernel boot arguments)" "$#" "$2" '.*' '^--'
80 TORTURE_BOOTARGS="$2" 80 TORTURE_BOOTARGS="$2"
81 shift 81 shift
@@ -88,7 +88,7 @@ do
88 --buildonly) 88 --buildonly)
89 TORTURE_BUILDONLY=1 89 TORTURE_BUILDONLY=1
90 ;; 90 ;;
91 --configs) 91 --configs|--config)
92 checkarg --configs "(list of config files)" "$#" "$2" '^[^/]*$' '^--' 92 checkarg --configs "(list of config files)" "$#" "$2" '^[^/]*$' '^--'
93 configs="$2" 93 configs="$2"
94 shift 94 shift
@@ -134,7 +134,7 @@ do
134 --no-initrd) 134 --no-initrd)
135 TORTURE_INITRD=""; export TORTURE_INITRD 135 TORTURE_INITRD=""; export TORTURE_INITRD
136 ;; 136 ;;
137 --qemu-args) 137 --qemu-args|--qemu-arg)
138 checkarg --qemu-args "-qemu args" $# "$2" '^-' '^error' 138 checkarg --qemu-args "-qemu args" $# "$2" '^-' '^error'
139 TORTURE_QEMU_ARG="$2" 139 TORTURE_QEMU_ARG="$2"
140 shift 140 shift
diff --git a/tools/testing/selftests/rcutorture/configs/lock/CFLIST b/tools/testing/selftests/rcutorture/configs/lock/CFLIST
index 6910b7370761..b9611c523723 100644
--- a/tools/testing/selftests/rcutorture/configs/lock/CFLIST
+++ b/tools/testing/selftests/rcutorture/configs/lock/CFLIST
@@ -1,4 +1,6 @@
1LOCK01 1LOCK01
2LOCK02 2LOCK02
3LOCK03 3LOCK03
4LOCK04 \ No newline at end of file 4LOCK04
5LOCK05
6LOCK06
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK05 b/tools/testing/selftests/rcutorture/configs/lock/LOCK05
new file mode 100644
index 000000000000..1d1da1477fc3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK05
@@ -0,0 +1,6 @@
1CONFIG_SMP=y
2CONFIG_NR_CPUS=4
3CONFIG_HOTPLUG_CPU=y
4CONFIG_PREEMPT_NONE=n
5CONFIG_PREEMPT_VOLUNTARY=n
6CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot b/tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot
new file mode 100644
index 000000000000..8ac37307c987
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot
@@ -0,0 +1 @@
locktorture.torture_type=rtmutex_lock
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK06 b/tools/testing/selftests/rcutorture/configs/lock/LOCK06
new file mode 100644
index 000000000000..1d1da1477fc3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK06
@@ -0,0 +1,6 @@
1CONFIG_SMP=y
2CONFIG_NR_CPUS=4
3CONFIG_HOTPLUG_CPU=y
4CONFIG_PREEMPT_NONE=n
5CONFIG_PREEMPT_VOLUNTARY=n
6CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK06.boot b/tools/testing/selftests/rcutorture/configs/lock/LOCK06.boot
new file mode 100644
index 000000000000..f92219cd4ad9
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK06.boot
@@ -0,0 +1 @@
locktorture.torture_type=percpu_rwsem_lock
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index 89a3f44bf355..4a1be1b75a7f 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -8,7 +8,7 @@ LDFLAGS += -lrt -lpthread
8TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \ 8TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \
9 inconsistency-check raw_skew threadtest rtctest 9 inconsistency-check raw_skew threadtest rtctest
10 10
11TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex change_skew \ 11TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \
12 skew_consistency clocksource-switch leap-a-day \ 12 skew_consistency clocksource-switch leap-a-day \
13 leapcrash set-tai set-2038 13 leapcrash set-tai set-2038
14 14
@@ -24,6 +24,7 @@ include ../lib.mk
24run_destructive_tests: run_tests 24run_destructive_tests: run_tests
25 ./alarmtimer-suspend 25 ./alarmtimer-suspend
26 ./valid-adjtimex 26 ./valid-adjtimex
27 ./adjtick
27 ./change_skew 28 ./change_skew
28 ./skew_consistency 29 ./skew_consistency
29 ./clocksource-switch 30 ./clocksource-switch
diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c
new file mode 100644
index 000000000000..9887fd538fec
--- /dev/null
+++ b/tools/testing/selftests/timers/adjtick.c
@@ -0,0 +1,221 @@
1/* adjtimex() tick adjustment test
2 * by: John Stultz <john.stultz@linaro.org>
3 * (C) Copyright Linaro Limited 2015
4 * Licensed under the GPLv2
5 *
6 * To build:
7 * $ gcc adjtick.c -o adjtick -lrt
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation, either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19#include <stdio.h>
20#include <unistd.h>
21#include <stdlib.h>
22#include <sys/time.h>
23#include <sys/timex.h>
24#include <time.h>
25
26#ifdef KTEST
27#include "../kselftest.h"
28#else
29static inline int ksft_exit_pass(void)
30{
31 exit(0);
32}
33static inline int ksft_exit_fail(void)
34{
35 exit(1);
36}
37#endif
38
39#define CLOCK_MONOTONIC_RAW 4
40
41#define NSEC_PER_SEC 1000000000LL
42#define USEC_PER_SEC 1000000
43
44#define MILLION 1000000
45
46long systick;
47
48long long llabs(long long val)
49{
50 if (val < 0)
51 val = -val;
52 return val;
53}
54
55unsigned long long ts_to_nsec(struct timespec ts)
56{
57 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
58}
59
60struct timespec nsec_to_ts(long long ns)
61{
62 struct timespec ts;
63
64 ts.tv_sec = ns/NSEC_PER_SEC;
65 ts.tv_nsec = ns%NSEC_PER_SEC;
66
67 return ts;
68}
69
70long long diff_timespec(struct timespec start, struct timespec end)
71{
72 long long start_ns, end_ns;
73
74 start_ns = ts_to_nsec(start);
75 end_ns = ts_to_nsec(end);
76
77 return end_ns - start_ns;
78}
79
80void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw)
81{
82 struct timespec start, mid, end;
83 long long diff = 0, tmp;
84 int i;
85
86 clock_gettime(CLOCK_MONOTONIC, mon);
87 clock_gettime(CLOCK_MONOTONIC_RAW, raw);
88
89 /* Try to get a more tightly bound pairing */
90 for (i = 0; i < 3; i++) {
91 long long newdiff;
92
93 clock_gettime(CLOCK_MONOTONIC, &start);
94 clock_gettime(CLOCK_MONOTONIC_RAW, &mid);
95 clock_gettime(CLOCK_MONOTONIC, &end);
96
97 newdiff = diff_timespec(start, end);
98 if (diff == 0 || newdiff < diff) {
99 diff = newdiff;
100 *raw = mid;
101 tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2;
102 *mon = nsec_to_ts(tmp);
103 }
104 }
105}
106
107long long get_ppm_drift(void)
108{
109 struct timespec mon_start, raw_start, mon_end, raw_end;
110 long long delta1, delta2, eppm;
111
112 get_monotonic_and_raw(&mon_start, &raw_start);
113
114 sleep(15);
115
116 get_monotonic_and_raw(&mon_end, &raw_end);
117
118 delta1 = diff_timespec(mon_start, mon_end);
119 delta2 = diff_timespec(raw_start, raw_end);
120
121 eppm = (delta1*MILLION)/delta2 - MILLION;
122
123 return eppm;
124}
125
126int check_tick_adj(long tickval)
127{
128 long long eppm, ppm;
129 struct timex tx1;
130
131 tx1.modes = ADJ_TICK;
132 tx1.modes |= ADJ_OFFSET;
133 tx1.modes |= ADJ_FREQUENCY;
134 tx1.modes |= ADJ_STATUS;
135
136 tx1.status = STA_PLL;
137 tx1.offset = 0;
138 tx1.freq = 0;
139 tx1.tick = tickval;
140
141 adjtimex(&tx1);
142
143 sleep(1);
144
145 ppm = ((long long)tickval * MILLION)/systick - MILLION;
146 printf("Estimating tick (act: %ld usec, %lld ppm): ", tickval, ppm);
147
148 eppm = get_ppm_drift();
149 printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm);
150
151 tx1.modes = 0;
152 adjtimex(&tx1);
153
154 if (tx1.offset || tx1.freq || tx1.tick != tickval) {
155 printf(" [ERROR]\n");
156 printf("\tUnexpected adjtimex return values, make sure ntpd is not running.\n");
157 return -1;
158 }
159
160 /*
161 * Here we use 100ppm difference as an error bound.
162 * We likely should see better, but some coarse clocksources
163 * cannot match the HZ tick size accurately, so we have a
164 * internal correction factor that doesn't scale exactly
165 * with the adjustment, resulting in > 10ppm error during
166 * a 10% adjustment. 100ppm also gives us more breathing
167 * room for interruptions during the measurement.
168 */
169 if (llabs(eppm - ppm) > 100) {
170 printf(" [FAILED]\n");
171 return -1;
172 }
173 printf(" [OK]\n");
174
175 return 0;
176}
177
178int main(int argv, char **argc)
179{
180 struct timespec raw;
181 long tick, max, interval, err;
182 struct timex tx1;
183
184 err = 0;
185 setbuf(stdout, NULL);
186
187 if (clock_gettime(CLOCK_MONOTONIC_RAW, &raw)) {
188 printf("ERR: NO CLOCK_MONOTONIC_RAW\n");
189 return -1;
190 }
191
192 printf("Each iteration takes about 15 seconds\n");
193
194 systick = sysconf(_SC_CLK_TCK);
195 systick = USEC_PER_SEC/sysconf(_SC_CLK_TCK);
196 max = systick/10; /* +/- 10% */
197 interval = max/4; /* in 4 steps each side */
198
199 for (tick = (systick - max); tick < (systick + max); tick += interval) {
200 if (check_tick_adj(tick)) {
201 err = 1;
202 break;
203 }
204 }
205
206 /* Reset things to zero */
207 tx1.modes = ADJ_TICK;
208 tx1.modes |= ADJ_OFFSET;
209 tx1.modes |= ADJ_FREQUENCY;
210
211 tx1.offset = 0;
212 tx1.freq = 0;
213 tx1.tick = systick;
214
215 adjtimex(&tx1);
216
217 if (err)
218 return ksft_exit_fail();
219
220 return ksft_exit_pass();
221}
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 29089b24d18b..389701f59940 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -4,8 +4,8 @@ include ../lib.mk
4 4
5.PHONY: all all_32 all_64 warn_32bit_failure clean 5.PHONY: all all_32 all_64 warn_32bit_failure clean
6 6
7TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs ldt_gdt syscall_nt 7TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs ldt_gdt syscall_nt ptrace_syscall
8TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn 8TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn test_syscall_vdso unwind_vdso
9 9
10TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) 10TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
11BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32) 11BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
@@ -60,3 +60,5 @@ endif
60 60
61# Some tests have additional dependencies. 61# Some tests have additional dependencies.
62sysret_ss_attrs_64: thunks.S 62sysret_ss_attrs_64: thunks.S
63ptrace_syscall_32: raw_syscall_helper_32.S
64test_syscall_vdso_32: thunks_32.S
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index 421c607a8856..d075ea0e5ca1 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -230,5 +230,9 @@ int main(void)
230 } 230 }
231 clearhandler(SIGSEGV); 231 clearhandler(SIGSEGV);
232 232
233 /* Make sure nothing explodes if we fork. */
234 if (fork() > 0)
235 return 0;
236
233 return (nerrs == 0 ? 0 : 1); 237 return (nerrs == 0 ? 0 : 1);
234} 238}
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
new file mode 100644
index 000000000000..5105b49cd8aa
--- /dev/null
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -0,0 +1,294 @@
1#define _GNU_SOURCE
2
3#include <sys/ptrace.h>
4#include <sys/types.h>
5#include <sys/wait.h>
6#include <sys/syscall.h>
7#include <sys/user.h>
8#include <unistd.h>
9#include <errno.h>
10#include <stddef.h>
11#include <stdio.h>
12#include <err.h>
13#include <string.h>
14#include <asm/ptrace-abi.h>
15#include <sys/auxv.h>
16
17/* Bitness-agnostic defines for user_regs_struct fields. */
18#ifdef __x86_64__
19# define user_syscall_nr orig_rax
20# define user_arg0 rdi
21# define user_arg1 rsi
22# define user_arg2 rdx
23# define user_arg3 r10
24# define user_arg4 r8
25# define user_arg5 r9
26# define user_ip rip
27# define user_ax rax
28#else
29# define user_syscall_nr orig_eax
30# define user_arg0 ebx
31# define user_arg1 ecx
32# define user_arg2 edx
33# define user_arg3 esi
34# define user_arg4 edi
35# define user_arg5 ebp
36# define user_ip eip
37# define user_ax eax
38#endif
39
40static int nerrs = 0;
41
42struct syscall_args32 {
43 uint32_t nr, arg0, arg1, arg2, arg3, arg4, arg5;
44};
45
46#ifdef __i386__
47extern void sys32_helper(struct syscall_args32 *, void *);
48extern void int80_and_ret(void);
49#endif
50
51/*
52 * Helper to invoke int80 with controlled regs and capture the final regs.
53 */
54static void do_full_int80(struct syscall_args32 *args)
55{
56#ifdef __x86_64__
57 register unsigned long bp asm("bp") = args->arg5;
58 asm volatile ("int $0x80"
59 : "+a" (args->nr),
60 "+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
61 "+S" (args->arg3), "+D" (args->arg4), "+r" (bp));
62 args->arg5 = bp;
63#else
64 sys32_helper(args, int80_and_ret);
65#endif
66}
67
68#ifdef __i386__
69static void (*vsyscall32)(void);
70
71/*
72 * Nasty helper to invoke AT_SYSINFO (i.e. __kernel_vsyscall) with
73 * controlled regs and capture the final regs. This is so nasty that it
74 * crashes my copy of gdb :)
75 */
76static void do_full_vsyscall32(struct syscall_args32 *args)
77{
78 sys32_helper(args, vsyscall32);
79}
80#endif
81
82static siginfo_t wait_trap(pid_t chld)
83{
84 siginfo_t si;
85 if (waitid(P_PID, chld, &si, WEXITED|WSTOPPED) != 0)
86 err(1, "waitid");
87 if (si.si_pid != chld)
88 errx(1, "got unexpected pid in event\n");
89 if (si.si_code != CLD_TRAPPED)
90 errx(1, "got unexpected event type %d\n", si.si_code);
91 return si;
92}
93
94static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
95 int flags)
96{
97 struct sigaction sa;
98 memset(&sa, 0, sizeof(sa));
99 sa.sa_sigaction = handler;
100 sa.sa_flags = SA_SIGINFO | flags;
101 sigemptyset(&sa.sa_mask);
102 if (sigaction(sig, &sa, 0))
103 err(1, "sigaction");
104}
105
106static void clearhandler(int sig)
107{
108 struct sigaction sa;
109 memset(&sa, 0, sizeof(sa));
110 sa.sa_handler = SIG_DFL;
111 sigemptyset(&sa.sa_mask);
112 if (sigaction(sig, &sa, 0))
113 err(1, "sigaction");
114}
115
116#ifdef __x86_64__
117# define REG_BP REG_RBP
118#else
119# define REG_BP REG_EBP
120#endif
121
122static void empty_handler(int sig, siginfo_t *si, void *ctx_void)
123{
124}
125
126static void test_sys32_regs(void (*do_syscall)(struct syscall_args32 *))
127{
128 struct syscall_args32 args = {
129 .nr = 224, /* gettid */
130 .arg0 = 10, .arg1 = 11, .arg2 = 12,
131 .arg3 = 13, .arg4 = 14, .arg5 = 15,
132 };
133
134 do_syscall(&args);
135
136 if (args.nr != getpid() ||
137 args.arg0 != 10 || args.arg1 != 11 || args.arg2 != 12 ||
138 args.arg3 != 13 || args.arg4 != 14 || args.arg5 != 15) {
139 printf("[FAIL]\tgetpid() failed to preseve regs\n");
140 nerrs++;
141 } else {
142 printf("[OK]\tgetpid() preserves regs\n");
143 }
144
145 sethandler(SIGUSR1, empty_handler, 0);
146
147 args.nr = 37; /* kill */
148 args.arg0 = getpid();
149 args.arg1 = SIGUSR1;
150 do_syscall(&args);
151 if (args.nr != 0 ||
152 args.arg0 != getpid() || args.arg1 != SIGUSR1 || args.arg2 != 12 ||
153 args.arg3 != 13 || args.arg4 != 14 || args.arg5 != 15) {
154 printf("[FAIL]\tkill(getpid(), SIGUSR1) failed to preseve regs\n");
155 nerrs++;
156 } else {
157 printf("[OK]\tkill(getpid(), SIGUSR1) preserves regs\n");
158 }
159 clearhandler(SIGUSR1);
160}
161
162static void test_ptrace_syscall_restart(void)
163{
164 printf("[RUN]\tptrace-induced syscall restart\n");
165 pid_t chld = fork();
166 if (chld < 0)
167 err(1, "fork");
168
169 if (chld == 0) {
170 if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
171 err(1, "PTRACE_TRACEME");
172
173 printf("\tChild will make one syscall\n");
174 raise(SIGSTOP);
175
176 syscall(SYS_gettid, 10, 11, 12, 13, 14, 15);
177 _exit(0);
178 }
179
180 int status;
181
182 /* Wait for SIGSTOP. */
183 if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status))
184 err(1, "waitpid");
185
186 struct user_regs_struct regs;
187
188 printf("[RUN]\tSYSEMU\n");
189 if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
190 err(1, "PTRACE_SYSCALL");
191 wait_trap(chld);
192
193 if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
194 err(1, "PTRACE_GETREGS");
195
196 if (regs.user_syscall_nr != SYS_gettid ||
197 regs.user_arg0 != 10 || regs.user_arg1 != 11 ||
198 regs.user_arg2 != 12 || regs.user_arg3 != 13 ||
199 regs.user_arg4 != 14 || regs.user_arg5 != 15) {
200 printf("[FAIL]\tInitial args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
201 nerrs++;
202 } else {
203 printf("[OK]\tInitial nr and args are correct\n");
204 }
205
206 printf("[RUN]\tRestart the syscall (ip = 0x%lx)\n",
207 (unsigned long)regs.user_ip);
208
209 /*
210 * This does exactly what it appears to do if syscall is int80 or
211 * SYSCALL64. For SYSCALL32 or SYSENTER, though, this is highly
212 * magical. It needs to work so that ptrace and syscall restart
213 * work as expected.
214 */
215 regs.user_ax = regs.user_syscall_nr;
216 regs.user_ip -= 2;
217 if (ptrace(PTRACE_SETREGS, chld, 0, &regs) != 0)
218 err(1, "PTRACE_SETREGS");
219
220 if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
221 err(1, "PTRACE_SYSCALL");
222 wait_trap(chld);
223
224 if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
225 err(1, "PTRACE_GETREGS");
226
227 if (regs.user_syscall_nr != SYS_gettid ||
228 regs.user_arg0 != 10 || regs.user_arg1 != 11 ||
229 regs.user_arg2 != 12 || regs.user_arg3 != 13 ||
230 regs.user_arg4 != 14 || regs.user_arg5 != 15) {
231 printf("[FAIL]\tRestart nr or args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
232 nerrs++;
233 } else {
234 printf("[OK]\tRestarted nr and args are correct\n");
235 }
236
237 printf("[RUN]\tChange nr and args and restart the syscall (ip = 0x%lx)\n",
238 (unsigned long)regs.user_ip);
239
240 regs.user_ax = SYS_getpid;
241 regs.user_arg0 = 20;
242 regs.user_arg1 = 21;
243 regs.user_arg2 = 22;
244 regs.user_arg3 = 23;
245 regs.user_arg4 = 24;
246 regs.user_arg5 = 25;
247 regs.user_ip -= 2;
248
249 if (ptrace(PTRACE_SETREGS, chld, 0, &regs) != 0)
250 err(1, "PTRACE_SETREGS");
251
252 if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
253 err(1, "PTRACE_SYSCALL");
254 wait_trap(chld);
255
256 if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
257 err(1, "PTRACE_GETREGS");
258
259 if (regs.user_syscall_nr != SYS_getpid ||
260 regs.user_arg0 != 20 || regs.user_arg1 != 21 || regs.user_arg2 != 22 ||
261 regs.user_arg3 != 23 || regs.user_arg4 != 24 || regs.user_arg5 != 25) {
262 printf("[FAIL]\tRestart nr or args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n", (unsigned long)regs.user_syscall_nr, (unsigned long)regs.user_arg0, (unsigned long)regs.user_arg1, (unsigned long)regs.user_arg2, (unsigned long)regs.user_arg3, (unsigned long)regs.user_arg4, (unsigned long)regs.user_arg5);
263 nerrs++;
264 } else {
265 printf("[OK]\tReplacement nr and args are correct\n");
266 }
267
268 if (ptrace(PTRACE_CONT, chld, 0, 0) != 0)
269 err(1, "PTRACE_CONT");
270 if (waitpid(chld, &status, 0) != chld)
271 err(1, "waitpid");
272 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
273 printf("[FAIL]\tChild failed\n");
274 nerrs++;
275 } else {
276 printf("[OK]\tChild exited cleanly\n");
277 }
278}
279
280int main()
281{
282 printf("[RUN]\tCheck int80 return regs\n");
283 test_sys32_regs(do_full_int80);
284
285#if defined(__i386__) && (!defined(__GLIBC__) || __GLIBC__ > 2 || __GLIBC_MINOR__ >= 16)
286 vsyscall32 = (void *)getauxval(AT_SYSINFO);
287 printf("[RUN]\tCheck AT_SYSINFO return regs\n");
288 test_sys32_regs(do_full_vsyscall32);
289#endif
290
291 test_ptrace_syscall_restart();
292
293 return 0;
294}
diff --git a/tools/testing/selftests/x86/raw_syscall_helper_32.S b/tools/testing/selftests/x86/raw_syscall_helper_32.S
new file mode 100644
index 000000000000..534e71e35c6a
--- /dev/null
+++ b/tools/testing/selftests/x86/raw_syscall_helper_32.S
@@ -0,0 +1,46 @@
1.global sys32_helper
2sys32_helper:
3 /* Args: syscall_args_32*, function pointer */
4 pushl %ebp
5 pushl %ebx
6 pushl %esi
7 pushl %edi
8 movl 5*4(%esp), %eax /* pointer to args struct */
9
10 movl 1*4(%eax), %ebx
11 movl 2*4(%eax), %ecx
12 movl 3*4(%eax), %edx
13 movl 4*4(%eax), %esi
14 movl 5*4(%eax), %edi
15 movl 6*4(%eax), %ebp
16 movl 0*4(%eax), %eax
17
18 call *(6*4)(%esp) /* Do the syscall */
19
20 /* Now we need to recover without losing any reg values */
21 pushl %eax
22 movl 6*4(%esp), %eax
23 popl 0*4(%eax)
24 movl %ebx, 1*4(%eax)
25 movl %ecx, 2*4(%eax)
26 movl %edx, 3*4(%eax)
27 movl %esi, 4*4(%eax)
28 movl %edi, 5*4(%eax)
29 movl %ebp, 6*4(%eax)
30
31 popl %edi
32 popl %esi
33 popl %ebx
34 popl %ebp
35 ret
36
37 .type sys32_helper, @function
38 .size sys32_helper, .-sys32_helper
39
40.global int80_and_ret
41int80_and_ret:
42 int $0x80
43 ret
44
45 .type int80_and_ret, @function
46 .size int80_and_ret, .-int80_and_ret
diff --git a/tools/testing/selftests/x86/test_syscall_vdso.c b/tools/testing/selftests/x86/test_syscall_vdso.c
new file mode 100644
index 000000000000..40370354d4c1
--- /dev/null
+++ b/tools/testing/selftests/x86/test_syscall_vdso.c
@@ -0,0 +1,401 @@
1/*
2 * 32-bit syscall ABI conformance test.
3 *
4 * Copyright (c) 2015 Denys Vlasenko
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15/*
16 * Can be built statically:
17 * gcc -Os -Wall -static -m32 test_syscall_vdso.c thunks_32.S
18 */
19#undef _GNU_SOURCE
20#define _GNU_SOURCE 1
21#undef __USE_GNU
22#define __USE_GNU 1
23#include <unistd.h>
24#include <stdlib.h>
25#include <string.h>
26#include <stdio.h>
27#include <signal.h>
28#include <sys/types.h>
29#include <sys/select.h>
30#include <sys/time.h>
31#include <elf.h>
32#include <sys/ptrace.h>
33#include <sys/wait.h>
34
35#if !defined(__i386__)
36int main(int argc, char **argv, char **envp)
37{
38 printf("[SKIP]\tNot a 32-bit x86 userspace\n");
39 return 0;
40}
41#else
42
43long syscall_addr;
44long get_syscall(char **envp)
45{
46 Elf32_auxv_t *auxv;
47 while (*envp++ != NULL)
48 continue;
49 for (auxv = (void *)envp; auxv->a_type != AT_NULL; auxv++)
50 if (auxv->a_type == AT_SYSINFO)
51 return auxv->a_un.a_val;
52 printf("[WARN]\tAT_SYSINFO not supplied\n");
53 return 0;
54}
55
56asm (
57 " .pushsection .text\n"
58 " .global int80\n"
59 "int80:\n"
60 " int $0x80\n"
61 " ret\n"
62 " .popsection\n"
63);
64extern char int80;
65
66struct regs64 {
67 uint64_t rax, rbx, rcx, rdx;
68 uint64_t rsi, rdi, rbp, rsp;
69 uint64_t r8, r9, r10, r11;
70 uint64_t r12, r13, r14, r15;
71};
72struct regs64 regs64;
73int kernel_is_64bit;
74
75asm (
76 " .pushsection .text\n"
77 " .code64\n"
78 "get_regs64:\n"
79 " push %rax\n"
80 " mov $regs64, %eax\n"
81 " pop 0*8(%rax)\n"
82 " movq %rbx, 1*8(%rax)\n"
83 " movq %rcx, 2*8(%rax)\n"
84 " movq %rdx, 3*8(%rax)\n"
85 " movq %rsi, 4*8(%rax)\n"
86 " movq %rdi, 5*8(%rax)\n"
87 " movq %rbp, 6*8(%rax)\n"
88 " movq %rsp, 7*8(%rax)\n"
89 " movq %r8, 8*8(%rax)\n"
90 " movq %r9, 9*8(%rax)\n"
91 " movq %r10, 10*8(%rax)\n"
92 " movq %r11, 11*8(%rax)\n"
93 " movq %r12, 12*8(%rax)\n"
94 " movq %r13, 13*8(%rax)\n"
95 " movq %r14, 14*8(%rax)\n"
96 " movq %r15, 15*8(%rax)\n"
97 " ret\n"
98 "poison_regs64:\n"
99 " movq $0x7f7f7f7f, %r8\n"
100 " shl $32, %r8\n"
101 " orq $0x7f7f7f7f, %r8\n"
102 " movq %r8, %r9\n"
103 " movq %r8, %r10\n"
104 " movq %r8, %r11\n"
105 " movq %r8, %r12\n"
106 " movq %r8, %r13\n"
107 " movq %r8, %r14\n"
108 " movq %r8, %r15\n"
109 " ret\n"
110 " .code32\n"
111 " .popsection\n"
112);
113extern void get_regs64(void);
114extern void poison_regs64(void);
115extern unsigned long call64_from_32(void (*function)(void));
116void print_regs64(void)
117{
118 if (!kernel_is_64bit)
119 return;
120 printf("ax:%016llx bx:%016llx cx:%016llx dx:%016llx\n", regs64.rax, regs64.rbx, regs64.rcx, regs64.rdx);
121 printf("si:%016llx di:%016llx bp:%016llx sp:%016llx\n", regs64.rsi, regs64.rdi, regs64.rbp, regs64.rsp);
122 printf(" 8:%016llx 9:%016llx 10:%016llx 11:%016llx\n", regs64.r8 , regs64.r9 , regs64.r10, regs64.r11);
123 printf("12:%016llx 13:%016llx 14:%016llx 15:%016llx\n", regs64.r12, regs64.r13, regs64.r14, regs64.r15);
124}
125
126int check_regs64(void)
127{
128 int err = 0;
129 int num = 8;
130 uint64_t *r64 = &regs64.r8;
131
132 if (!kernel_is_64bit)
133 return 0;
134
135 do {
136 if (*r64 == 0x7f7f7f7f7f7f7f7fULL)
137 continue; /* register did not change */
138 if (syscall_addr != (long)&int80) {
139 /*
140 * Non-INT80 syscall entrypoints are allowed to clobber R8+ regs:
141 * either clear them to 0, or for R11, load EFLAGS.
142 */
143 if (*r64 == 0)
144 continue;
145 if (num == 11) {
146 printf("[NOTE]\tR11 has changed:%016llx - assuming clobbered by SYSRET insn\n", *r64);
147 continue;
148 }
149 } else {
150 /* INT80 syscall entrypoint can be used by
151 * 64-bit programs too, unlike SYSCALL/SYSENTER.
152 * Therefore it must preserve R12+
153 * (they are callee-saved registers in 64-bit C ABI).
154 *
155 * This was probably historically not intended,
156 * but R8..11 are clobbered (cleared to 0).
157 * IOW: they are the only registers which aren't
158 * preserved across INT80 syscall.
159 */
160 if (*r64 == 0 && num <= 11)
161 continue;
162 }
163 printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64);
164 err++;
165 } while (r64++, ++num < 16);
166
167 if (!err)
168 printf("[OK]\tR8..R15 did not leak kernel data\n");
169 return err;
170}
171
172int nfds;
173fd_set rfds;
174fd_set wfds;
175fd_set efds;
176struct timespec timeout;
177sigset_t sigmask;
178struct {
179 sigset_t *sp;
180 int sz;
181} sigmask_desc;
182
183void prep_args()
184{
185 nfds = 42;
186 FD_ZERO(&rfds);
187 FD_ZERO(&wfds);
188 FD_ZERO(&efds);
189 FD_SET(0, &rfds);
190 FD_SET(1, &wfds);
191 FD_SET(2, &efds);
192 timeout.tv_sec = 0;
193 timeout.tv_nsec = 123;
194 sigemptyset(&sigmask);
195 sigaddset(&sigmask, SIGINT);
196 sigaddset(&sigmask, SIGUSR2);
197 sigaddset(&sigmask, SIGRTMAX);
198 sigmask_desc.sp = &sigmask;
199 sigmask_desc.sz = 8; /* bytes */
200}
201
202static void print_flags(const char *name, unsigned long r)
203{
204 static const char *bitarray[] = {
205 "\n" ,"c\n" ,/* Carry Flag */
206 "0 " ,"1 " ,/* Bit 1 - always on */
207 "" ,"p " ,/* Parity Flag */
208 "0 " ,"3? " ,
209 "" ,"a " ,/* Auxiliary carry Flag */
210 "0 " ,"5? " ,
211 "" ,"z " ,/* Zero Flag */
212 "" ,"s " ,/* Sign Flag */
213 "" ,"t " ,/* Trap Flag */
214 "" ,"i " ,/* Interrupt Flag */
215 "" ,"d " ,/* Direction Flag */
216 "" ,"o " ,/* Overflow Flag */
217 "0 " ,"1 " ,/* I/O Privilege Level (2 bits) */
218 "0" ,"1" ,/* I/O Privilege Level (2 bits) */
219 "" ,"n " ,/* Nested Task */
220 "0 " ,"15? ",
221 "" ,"r " ,/* Resume Flag */
222 "" ,"v " ,/* Virtual Mode */
223 "" ,"ac " ,/* Alignment Check/Access Control */
224 "" ,"vif ",/* Virtual Interrupt Flag */
225 "" ,"vip ",/* Virtual Interrupt Pending */
226 "" ,"id " ,/* CPUID detection */
227 NULL
228 };
229 const char **bitstr;
230 int bit;
231
232 printf("%s=%016lx ", name, r);
233 bitstr = bitarray + 42;
234 bit = 21;
235 if ((r >> 22) != 0)
236 printf("(extra bits are set) ");
237 do {
238 if (bitstr[(r >> bit) & 1][0])
239 fputs(bitstr[(r >> bit) & 1], stdout);
240 bitstr -= 2;
241 bit--;
242 } while (bit >= 0);
243}
244
245int run_syscall(void)
246{
247 long flags, bad_arg;
248
249 prep_args();
250
251 if (kernel_is_64bit)
252 call64_from_32(poison_regs64);
253 /*print_regs64();*/
254
255 asm("\n"
256 /* Try 6-arg syscall: pselect. It should return quickly */
257 " push %%ebp\n"
258 " mov $308, %%eax\n" /* PSELECT */
259 " mov nfds, %%ebx\n" /* ebx arg1 */
260 " mov $rfds, %%ecx\n" /* ecx arg2 */
261 " mov $wfds, %%edx\n" /* edx arg3 */
262 " mov $efds, %%esi\n" /* esi arg4 */
263 " mov $timeout, %%edi\n" /* edi arg5 */
264 " mov $sigmask_desc, %%ebp\n" /* %ebp arg6 */
265 " push $0x200ed7\n" /* set almost all flags */
266 " popf\n" /* except TF, IOPL, NT, RF, VM, AC, VIF, VIP */
267 " call *syscall_addr\n"
268 /* Check that registers are not clobbered */
269 " pushf\n"
270 " pop %%eax\n"
271 " cld\n"
272 " cmp nfds, %%ebx\n" /* ebx arg1 */
273 " mov $1, %%ebx\n"
274 " jne 1f\n"
275 " cmp $rfds, %%ecx\n" /* ecx arg2 */
276 " mov $2, %%ebx\n"
277 " jne 1f\n"
278 " cmp $wfds, %%edx\n" /* edx arg3 */
279 " mov $3, %%ebx\n"
280 " jne 1f\n"
281 " cmp $efds, %%esi\n" /* esi arg4 */
282 " mov $4, %%ebx\n"
283 " jne 1f\n"
284 " cmp $timeout, %%edi\n" /* edi arg5 */
285 " mov $5, %%ebx\n"
286 " jne 1f\n"
287 " cmpl $sigmask_desc, %%ebp\n" /* %ebp arg6 */
288 " mov $6, %%ebx\n"
289 " jne 1f\n"
290 " mov $0, %%ebx\n"
291 "1:\n"
292 " pop %%ebp\n"
293 : "=a" (flags), "=b" (bad_arg)
294 :
295 : "cx", "dx", "si", "di"
296 );
297
298 if (kernel_is_64bit) {
299 memset(&regs64, 0x77, sizeof(regs64));
300 call64_from_32(get_regs64);
301 /*print_regs64();*/
302 }
303
304 /*
305 * On paravirt kernels, flags are not preserved across syscalls.
306 * Thus, we do not consider it a bug if some are changed.
307 * We just show ones which do.
308 */
309 if ((0x200ed7 ^ flags) != 0) {
310 print_flags("[WARN]\tFlags before", 0x200ed7);
311 print_flags("[WARN]\tFlags after", flags);
312 print_flags("[WARN]\tFlags change", (0x200ed7 ^ flags));
313 }
314
315 if (bad_arg) {
316 printf("[FAIL]\targ#%ld clobbered\n", bad_arg);
317 return 1;
318 }
319 printf("[OK]\tArguments are preserved across syscall\n");
320
321 return check_regs64();
322}
323
324int run_syscall_twice()
325{
326 int exitcode = 0;
327 long sv;
328
329 if (syscall_addr) {
330 printf("[RUN]\tExecuting 6-argument 32-bit syscall via VDSO\n");
331 exitcode = run_syscall();
332 }
333 sv = syscall_addr;
334 syscall_addr = (long)&int80;
335 printf("[RUN]\tExecuting 6-argument 32-bit syscall via INT 80\n");
336 exitcode += run_syscall();
337 syscall_addr = sv;
338 return exitcode;
339}
340
341void ptrace_me()
342{
343 pid_t pid;
344
345 fflush(NULL);
346 pid = fork();
347 if (pid < 0)
348 exit(1);
349 if (pid == 0) {
350 /* child */
351 if (ptrace(PTRACE_TRACEME, 0L, 0L, 0L) != 0)
352 exit(0);
353 raise(SIGSTOP);
354 return;
355 }
356 /* parent */
357 printf("[RUN]\tRunning tests under ptrace\n");
358 while (1) {
359 int status;
360 pid = waitpid(-1, &status, __WALL);
361 if (WIFEXITED(status))
362 exit(WEXITSTATUS(status));
363 if (WIFSIGNALED(status))
364 exit(WTERMSIG(status));
365 if (pid <= 0 || !WIFSTOPPED(status)) /* paranoia */
366 exit(255);
367 /*
368 * Note: we do not inject sig = WSTOPSIG(status).
369 * We probably should, but careful: do not inject SIGTRAP
370 * generated by syscall entry/exit stops.
371 * That kills the child.
372 */
373 ptrace(PTRACE_SYSCALL, pid, 0L, 0L /*sig*/);
374 }
375}
376
377int main(int argc, char **argv, char **envp)
378{
379 int exitcode = 0;
380 int cs;
381
382 asm("\n"
383 " movl %%cs, %%eax\n"
384 : "=a" (cs)
385 );
386 kernel_is_64bit = (cs == 0x23);
387 if (!kernel_is_64bit)
388 printf("[NOTE]\tNot a 64-bit kernel, won't test R8..R15 leaks\n");
389
390 /* This only works for non-static builds:
391 * syscall_addr = dlsym(dlopen("linux-gate.so.1", RTLD_NOW), "__kernel_vsyscall");
392 */
393 syscall_addr = get_syscall(envp);
394
395 exitcode += run_syscall_twice();
396 ptrace_me();
397 exitcode += run_syscall_twice();
398
399 return exitcode;
400}
401#endif
diff --git a/tools/testing/selftests/x86/thunks_32.S b/tools/testing/selftests/x86/thunks_32.S
new file mode 100644
index 000000000000..29b644bb9f2f
--- /dev/null
+++ b/tools/testing/selftests/x86/thunks_32.S
@@ -0,0 +1,55 @@
1/*
2 * thunks_32.S - assembly helpers for mixed-bitness code
3 * Copyright (c) 2015 Denys Vlasenko
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * These are little helpers that make it easier to switch bitness on
15 * the fly.
16 */
17
18 .text
19 .code32
20
21 .global call64_from_32
22 .type call32_from_64, @function
23
24 // 4(%esp): function to call
25call64_from_32:
26 // Fetch function address
27 mov 4(%esp), %eax
28
29 // Save registers which are callee-clobbered by 64-bit ABI
30 push %ecx
31 push %edx
32 push %esi
33 push %edi
34
35 // Switch to long mode
36 jmp $0x33,$1f
371: .code64
38
39 // Call the function
40 call *%rax
41
42 // Switch to compatibility mode
43 push $0x23 /* USER32_CS */
44 .code32; push $1f; .code64 /* hack: can't have X86_64_32S relocation in 32-bit ELF */
45 lretq
461: .code32
47
48 pop %edi
49 pop %esi
50 pop %edx
51 pop %ecx
52
53 ret
54
55.size call64_from_32, .-call64_from_32
diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c
new file mode 100644
index 000000000000..00a26a82fa98
--- /dev/null
+++ b/tools/testing/selftests/x86/unwind_vdso.c
@@ -0,0 +1,211 @@
1/*
2 * unwind_vdso.c - tests unwind info for AT_SYSINFO in the vDSO
3 * Copyright (c) 2014-2015 Andrew Lutomirski
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * This tests __kernel_vsyscall's unwind info.
15 */
16
17#define _GNU_SOURCE
18
19#include <features.h>
20#include <stdio.h>
21
22#if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16
23
24int main()
25{
26 /* We need getauxval(). */
27 printf("[SKIP]\tGLIBC before 2.16 cannot compile this test\n");
28 return 0;
29}
30
31#else
32
33#include <sys/time.h>
34#include <stdlib.h>
35#include <syscall.h>
36#include <unistd.h>
37#include <string.h>
38#include <inttypes.h>
39#include <sys/mman.h>
40#include <signal.h>
41#include <sys/ucontext.h>
42#include <err.h>
43#include <stddef.h>
44#include <stdbool.h>
45#include <sys/ptrace.h>
46#include <sys/user.h>
47#include <sys/ucontext.h>
48#include <link.h>
49#include <sys/auxv.h>
50#include <dlfcn.h>
51#include <unwind.h>
52
53static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
54 int flags)
55{
56 struct sigaction sa;
57 memset(&sa, 0, sizeof(sa));
58 sa.sa_sigaction = handler;
59 sa.sa_flags = SA_SIGINFO | flags;
60 sigemptyset(&sa.sa_mask);
61 if (sigaction(sig, &sa, 0))
62 err(1, "sigaction");
63}
64
65#ifdef __x86_64__
66# define WIDTH "q"
67#else
68# define WIDTH "l"
69#endif
70
71static unsigned long get_eflags(void)
72{
73 unsigned long eflags;
74 asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
75 return eflags;
76}
77
78static void set_eflags(unsigned long eflags)
79{
80 asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
81 : : "rm" (eflags) : "flags");
82}
83
84#define X86_EFLAGS_TF (1UL << 8)
85
86static volatile sig_atomic_t nerrs;
87static unsigned long sysinfo;
88static bool got_sysinfo = false;
89static unsigned long return_address;
90
91struct unwind_state {
92 unsigned long ip; /* trap source */
93 int depth; /* -1 until we hit the trap source */
94};
95
96_Unwind_Reason_Code trace_fn(struct _Unwind_Context * ctx, void *opaque)
97{
98 struct unwind_state *state = opaque;
99 unsigned long ip = _Unwind_GetIP(ctx);
100
101 if (state->depth == -1) {
102 if (ip == state->ip)
103 state->depth = 0;
104 else
105 return _URC_NO_REASON; /* Not there yet */
106 }
107 printf("\t 0x%lx\n", ip);
108
109 if (ip == return_address) {
110 /* Here we are. */
111 unsigned long eax = _Unwind_GetGR(ctx, 0);
112 unsigned long ecx = _Unwind_GetGR(ctx, 1);
113 unsigned long edx = _Unwind_GetGR(ctx, 2);
114 unsigned long ebx = _Unwind_GetGR(ctx, 3);
115 unsigned long ebp = _Unwind_GetGR(ctx, 5);
116 unsigned long esi = _Unwind_GetGR(ctx, 6);
117 unsigned long edi = _Unwind_GetGR(ctx, 7);
118 bool ok = (eax == SYS_getpid || eax == getpid()) &&
119 ebx == 1 && ecx == 2 && edx == 3 &&
120 esi == 4 && edi == 5 && ebp == 6;
121
122 if (!ok)
123 nerrs++;
124 printf("[%s]\t NR = %ld, args = %ld, %ld, %ld, %ld, %ld, %ld\n",
125 (ok ? "OK" : "FAIL"),
126 eax, ebx, ecx, edx, esi, edi, ebp);
127
128 return _URC_NORMAL_STOP;
129 } else {
130 state->depth++;
131 return _URC_NO_REASON;
132 }
133}
134
135static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
136{
137 ucontext_t *ctx = (ucontext_t *)ctx_void;
138 struct unwind_state state;
139 unsigned long ip = ctx->uc_mcontext.gregs[REG_EIP];
140
141 if (!got_sysinfo && ip == sysinfo) {
142 got_sysinfo = true;
143
144 /* Find the return address. */
145 return_address = *(unsigned long *)(unsigned long)ctx->uc_mcontext.gregs[REG_ESP];
146
147 printf("\tIn vsyscall at 0x%lx, returning to 0x%lx\n",
148 ip, return_address);
149 }
150
151 if (!got_sysinfo)
152 return; /* Not there yet */
153
154 if (ip == return_address) {
155 ctx->uc_mcontext.gregs[REG_EFL] &= ~X86_EFLAGS_TF;
156 printf("\tVsyscall is done\n");
157 return;
158 }
159
160 printf("\tSIGTRAP at 0x%lx\n", ip);
161
162 state.ip = ip;
163 state.depth = -1;
164 _Unwind_Backtrace(trace_fn, &state);
165}
166
167int main()
168{
169 sysinfo = getauxval(AT_SYSINFO);
170 printf("\tAT_SYSINFO is 0x%lx\n", sysinfo);
171
172 Dl_info info;
173 if (!dladdr((void *)sysinfo, &info)) {
174 printf("[WARN]\tdladdr failed on AT_SYSINFO\n");
175 } else {
176 printf("[OK]\tAT_SYSINFO maps to %s, loaded at 0x%p\n",
177 info.dli_fname, info.dli_fbase);
178 }
179
180 sethandler(SIGTRAP, sigtrap, 0);
181
182 syscall(SYS_getpid); /* Force symbol binding without TF set. */
183 printf("[RUN]\tSet TF and check a fast syscall\n");
184 set_eflags(get_eflags() | X86_EFLAGS_TF);
185 syscall(SYS_getpid, 1, 2, 3, 4, 5, 6);
186 if (!got_sysinfo) {
187 set_eflags(get_eflags() & ~X86_EFLAGS_TF);
188
189 /*
190 * The most likely cause of this is that you're on Debian or
191 * a Debian-based distro, you're missing libc6-i686, and you're
192 * affected by libc/19006 (https://sourceware.org/PR19006).
193 */
194 printf("[WARN]\tsyscall(2) didn't enter AT_SYSINFO\n");
195 }
196
197 if (get_eflags() & X86_EFLAGS_TF) {
198 printf("[FAIL]\tTF is still set\n");
199 nerrs++;
200 }
201
202 if (nerrs) {
203 printf("[FAIL]\tThere were errors\n");
204 return 1;
205 } else {
206 printf("[OK]\tAll is well\n");
207 return 0;
208 }
209}
210
211#endif /* New enough libc */
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 7f73fa32a590..bcf5ec760eb9 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -42,7 +42,7 @@
42#include <sys/mman.h> 42#include <sys/mman.h>
43#include "../../include/uapi/linux/magic.h" 43#include "../../include/uapi/linux/magic.h"
44#include "../../include/uapi/linux/kernel-page-flags.h" 44#include "../../include/uapi/linux/kernel-page-flags.h"
45#include <api/fs/debugfs.h> 45#include <api/fs/fs.h>
46 46
47#ifndef MAX_PATH 47#ifndef MAX_PATH
48# define MAX_PATH 256 48# define MAX_PATH 256
@@ -188,7 +188,7 @@ static int kpageflags_fd;
188static int opt_hwpoison; 188static int opt_hwpoison;
189static int opt_unpoison; 189static int opt_unpoison;
190 190
191static char *hwpoison_debug_fs; 191static const char *hwpoison_debug_fs;
192static int hwpoison_inject_fd; 192static int hwpoison_inject_fd;
193static int hwpoison_forget_fd; 193static int hwpoison_forget_fd;
194 194
@@ -487,7 +487,7 @@ static void prepare_hwpoison_fd(void)
487{ 487{
488 char buf[MAX_PATH + 1]; 488 char buf[MAX_PATH + 1];
489 489
490 hwpoison_debug_fs = debugfs_mount(NULL); 490 hwpoison_debug_fs = debugfs__mount();
491 if (!hwpoison_debug_fs) { 491 if (!hwpoison_debug_fs) {
492 perror("mount debugfs"); 492 perror("mount debugfs");
493 exit(EXIT_FAILURE); 493 exit(EXIT_FAILURE);
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 48c6e1ac6827..b9d3a32cbc04 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -137,6 +137,8 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
137void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) 137void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
138{ 138{
139 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 139 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
140 bool phys_active;
141 int ret;
140 142
141 /* 143 /*
142 * We're about to run this vcpu again, so there is no need to 144 * We're about to run this vcpu again, so there is no need to
@@ -151,6 +153,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
151 */ 153 */
152 if (kvm_timer_should_fire(vcpu)) 154 if (kvm_timer_should_fire(vcpu))
153 kvm_timer_inject_irq(vcpu); 155 kvm_timer_inject_irq(vcpu);
156
157 /*
158 * We keep track of whether the edge-triggered interrupt has been
159 * signalled to the vgic/guest, and if so, we mask the interrupt and
160 * the physical distributor to prevent the timer from raising a
161 * physical interrupt whenever we run a guest, preventing forward
162 * VCPU progress.
163 */
164 if (kvm_vgic_get_phys_irq_active(timer->map))
165 phys_active = true;
166 else
167 phys_active = false;
168
169 ret = irq_set_irqchip_state(timer->map->irq,
170 IRQCHIP_STATE_ACTIVE,
171 phys_active);
172 WARN_ON(ret);
154} 173}
155 174
156/** 175/**
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 6bd1c9bf7ae7..30489181922d 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -531,6 +531,34 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm,
531 return false; 531 return false;
532} 532}
533 533
534/*
535 * If a mapped interrupt's state has been modified by the guest such that it
536 * is no longer active or pending, without it have gone through the sync path,
537 * then the map->active field must be cleared so the interrupt can be taken
538 * again.
539 */
540static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu)
541{
542 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
543 struct list_head *root;
544 struct irq_phys_map_entry *entry;
545 struct irq_phys_map *map;
546
547 rcu_read_lock();
548
549 /* Check for PPIs */
550 root = &vgic_cpu->irq_phys_map_list;
551 list_for_each_entry_rcu(entry, root, entry) {
552 map = &entry->map;
553
554 if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) &&
555 !vgic_irq_is_active(vcpu, map->virt_irq))
556 map->active = false;
557 }
558
559 rcu_read_unlock();
560}
561
534bool vgic_handle_clear_pending_reg(struct kvm *kvm, 562bool vgic_handle_clear_pending_reg(struct kvm *kvm,
535 struct kvm_exit_mmio *mmio, 563 struct kvm_exit_mmio *mmio,
536 phys_addr_t offset, int vcpu_id) 564 phys_addr_t offset, int vcpu_id)
@@ -561,6 +589,7 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
561 vcpu_id, offset); 589 vcpu_id, offset);
562 vgic_reg_access(mmio, reg, offset, mode); 590 vgic_reg_access(mmio, reg, offset, mode);
563 591
592 vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
564 vgic_update_state(kvm); 593 vgic_update_state(kvm);
565 return true; 594 return true;
566 } 595 }
@@ -598,6 +627,7 @@ bool vgic_handle_clear_active_reg(struct kvm *kvm,
598 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); 627 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
599 628
600 if (mmio->is_write) { 629 if (mmio->is_write) {
630 vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
601 vgic_update_state(kvm); 631 vgic_update_state(kvm);
602 return true; 632 return true;
603 } 633 }
@@ -982,6 +1012,12 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
982 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; 1012 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
983 pend_shared = vcpu->arch.vgic_cpu.pending_shared; 1013 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
984 1014
1015 if (!dist->enabled) {
1016 bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
1017 bitmap_zero(pend_shared, nr_shared);
1018 return 0;
1019 }
1020
985 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id); 1021 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
986 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); 1022 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
987 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); 1023 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
@@ -1009,11 +1045,6 @@ void vgic_update_state(struct kvm *kvm)
1009 struct kvm_vcpu *vcpu; 1045 struct kvm_vcpu *vcpu;
1010 int c; 1046 int c;
1011 1047
1012 if (!dist->enabled) {
1013 set_bit(0, dist->irq_pending_on_cpu);
1014 return;
1015 }
1016
1017 kvm_for_each_vcpu(c, vcpu, kvm) { 1048 kvm_for_each_vcpu(c, vcpu, kvm) {
1018 if (compute_pending_for_cpu(vcpu)) 1049 if (compute_pending_for_cpu(vcpu))
1019 set_bit(c, dist->irq_pending_on_cpu); 1050 set_bit(c, dist->irq_pending_on_cpu);
@@ -1092,6 +1123,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1092 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1123 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1093 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); 1124 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1094 1125
1126 /*
1127 * We must transfer the pending state back to the distributor before
1128 * retiring the LR, otherwise we may loose edge-triggered interrupts.
1129 */
1130 if (vlr.state & LR_STATE_PENDING) {
1131 vgic_dist_irq_set_pending(vcpu, irq);
1132 vlr.hwirq = 0;
1133 }
1134
1095 vlr.state = 0; 1135 vlr.state = 0;
1096 vgic_set_lr(vcpu, lr_nr, vlr); 1136 vgic_set_lr(vcpu, lr_nr, vlr);
1097 clear_bit(lr_nr, vgic_cpu->lr_used); 1137 clear_bit(lr_nr, vgic_cpu->lr_used);
@@ -1132,7 +1172,8 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
1132 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); 1172 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
1133 vgic_irq_clear_active(vcpu, irq); 1173 vgic_irq_clear_active(vcpu, irq);
1134 vgic_update_state(vcpu->kvm); 1174 vgic_update_state(vcpu->kvm);
1135 } else if (vgic_dist_irq_is_pending(vcpu, irq)) { 1175 } else {
1176 WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
1136 vlr.state |= LR_STATE_PENDING; 1177 vlr.state |= LR_STATE_PENDING;
1137 kvm_debug("Set pending: 0x%x\n", vlr.state); 1178 kvm_debug("Set pending: 0x%x\n", vlr.state);
1138 } 1179 }
@@ -1240,7 +1281,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1240 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1281 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1241 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1282 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1242 unsigned long *pa_percpu, *pa_shared; 1283 unsigned long *pa_percpu, *pa_shared;
1243 int i, vcpu_id, lr, ret; 1284 int i, vcpu_id;
1244 int overflow = 0; 1285 int overflow = 0;
1245 int nr_shared = vgic_nr_shared_irqs(dist); 1286 int nr_shared = vgic_nr_shared_irqs(dist);
1246 1287
@@ -1295,31 +1336,6 @@ epilog:
1295 */ 1336 */
1296 clear_bit(vcpu_id, dist->irq_pending_on_cpu); 1337 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1297 } 1338 }
1298
1299 for (lr = 0; lr < vgic->nr_lr; lr++) {
1300 struct vgic_lr vlr;
1301
1302 if (!test_bit(lr, vgic_cpu->lr_used))
1303 continue;
1304
1305 vlr = vgic_get_lr(vcpu, lr);
1306
1307 /*
1308 * If we have a mapping, and the virtual interrupt is
1309 * presented to the guest (as pending or active), then we must
1310 * set the state to active in the physical world. See
1311 * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt.
1312 */
1313 if (vlr.state & LR_HW) {
1314 struct irq_phys_map *map;
1315 map = vgic_irq_map_search(vcpu, vlr.irq);
1316
1317 ret = irq_set_irqchip_state(map->irq,
1318 IRQCHIP_STATE_ACTIVE,
1319 true);
1320 WARN_ON(ret);
1321 }
1322 }
1323} 1339}
1324 1340
1325static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) 1341static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
@@ -1421,7 +1437,7 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
1421 return 0; 1437 return 0;
1422 1438
1423 map = vgic_irq_map_search(vcpu, vlr.irq); 1439 map = vgic_irq_map_search(vcpu, vlr.irq);
1424 BUG_ON(!map || !map->active); 1440 BUG_ON(!map);
1425 1441
1426 ret = irq_get_irqchip_state(map->irq, 1442 ret = irq_get_irqchip_state(map->irq,
1427 IRQCHIP_STATE_ACTIVE, 1443 IRQCHIP_STATE_ACTIVE,
@@ -1429,13 +1445,8 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
1429 1445
1430 WARN_ON(ret); 1446 WARN_ON(ret);
1431 1447
1432 if (map->active) { 1448 if (map->active)
1433 ret = irq_set_irqchip_state(map->irq,
1434 IRQCHIP_STATE_ACTIVE,
1435 false);
1436 WARN_ON(ret);
1437 return 0; 1449 return 0;
1438 }
1439 1450
1440 return 1; 1451 return 1;
1441} 1452}
@@ -1607,8 +1618,12 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1607 } else { 1618 } else {
1608 if (level_triggered) { 1619 if (level_triggered) {
1609 vgic_dist_irq_clear_level(vcpu, irq_num); 1620 vgic_dist_irq_clear_level(vcpu, irq_num);
1610 if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) 1621 if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
1611 vgic_dist_irq_clear_pending(vcpu, irq_num); 1622 vgic_dist_irq_clear_pending(vcpu, irq_num);
1623 vgic_cpu_irq_clear(vcpu, irq_num);
1624 if (!compute_pending_for_cpu(vcpu))
1625 clear_bit(cpuid, dist->irq_pending_on_cpu);
1626 }
1612 } 1627 }
1613 1628
1614 ret = false; 1629 ret = false;
@@ -2122,7 +2137,7 @@ static int init_vgic_model(struct kvm *kvm, int type)
2122 case KVM_DEV_TYPE_ARM_VGIC_V2: 2137 case KVM_DEV_TYPE_ARM_VGIC_V2:
2123 vgic_v2_init_emulation(kvm); 2138 vgic_v2_init_emulation(kvm);
2124 break; 2139 break;
2125#ifdef CONFIG_ARM_GIC_V3 2140#ifdef CONFIG_KVM_ARM_VGIC_V3
2126 case KVM_DEV_TYPE_ARM_VGIC_V3: 2141 case KVM_DEV_TYPE_ARM_VGIC_V3:
2127 vgic_v3_init_emulation(kvm); 2142 vgic_v3_init_emulation(kvm);
2128 break; 2143 break;
@@ -2284,7 +2299,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
2284 block_size = KVM_VGIC_V2_CPU_SIZE; 2299 block_size = KVM_VGIC_V2_CPU_SIZE;
2285 alignment = SZ_4K; 2300 alignment = SZ_4K;
2286 break; 2301 break;
2287#ifdef CONFIG_ARM_GIC_V3 2302#ifdef CONFIG_KVM_ARM_VGIC_V3
2288 case KVM_VGIC_V3_ADDR_TYPE_DIST: 2303 case KVM_VGIC_V3_ADDR_TYPE_DIST:
2289 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; 2304 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
2290 addr_ptr = &vgic->vgic_dist_base; 2305 addr_ptr = &vgic->vgic_dist_base;